python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/*
*
* Functions that talk to the USB variant of the Intersil hfa384x MAC
*
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
* --------------------------------------------------------------------
*
* linux-wlan
*
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
* made directly to:
*
* AbsoluteValue Systems Inc.
* [email protected]
* http://www.linux-wlan.com
*
* --------------------------------------------------------------------
*
* Portions of the development of this software were funded by
* Intersil Corporation as part of PRISM(R) chipset product development.
*
* --------------------------------------------------------------------
*
* This file implements functions that correspond to the prism2/hfa384x
* 802.11 MAC hardware and firmware host interface.
*
* The functions can be considered to represent several levels of
* abstraction. The lowest level functions are simply C-callable wrappers
* around the register accesses. The next higher level represents C-callable
* prism2 API functions that match the Intersil documentation as closely
* as is reasonable. The next higher layer implements common sequences
* of invocations of the API layer (e.g. write to bap, followed by cmd).
*
* Common sequences:
* hfa384x_drvr_xxx Highest level abstractions provided by the
* hfa384x code. They are driver defined wrappers
* for common sequences. These functions generally
* use the services of the lower levels.
*
* hfa384x_drvr_xxxconfig An example of the drvr level abstraction. These
* functions are wrappers for the RID get/set
* sequence. They call copy_[to|from]_bap() and
* cmd_access(). These functions operate on the
* RIDs and buffers without validation. The caller
* is responsible for that.
*
* API wrapper functions:
* hfa384x_cmd_xxx functions that provide access to the f/w commands.
* The function arguments correspond to each command
* argument, even command arguments that get packed
* into single registers. These functions _just_
* issue the command by setting the cmd/parm regs
* & reading the status/resp regs. Additional
* activities required to fully use a command
* (read/write from/to bap, get/set int status etc.)
* are implemented separately. Think of these as
* C-callable prism2 commands.
*
* Lowest Layer Functions:
* hfa384x_docmd_xxx These functions implement the sequence required
* to issue any prism2 command. Primarily used by the
* hfa384x_cmd_xxx functions.
*
* hfa384x_bap_xxx BAP read/write access functions.
* Note: we usually use BAP0 for non-interrupt context
* and BAP1 for interrupt context.
*
* hfa384x_dl_xxx download related functions.
*
* Driver State Issues:
* Note that there are two pairs of functions that manage the
* 'initialized' and 'running' states of the hw/MAC combo. The four
* functions are create(), destroy(), start(), and stop(). create()
* sets up the data structures required to support the hfa384x_*
* functions and destroy() cleans them up. The start() function gets
* the actual hardware running and enables the interrupts. The stop()
* function shuts the hardware down. The sequence should be:
* create()
* start()
* .
* . Do interesting things w/ the hardware
* .
* stop()
* destroy()
*
* Note that destroy() can be called without calling stop() first.
* --------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/wireless.h>
#include <linux/netdevice.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/byteorder/generic.h>
#include "p80211types.h"
#include "p80211hdr.h"
#include "p80211mgmt.h"
#include "p80211conv.h"
#include "p80211msg.h"
#include "p80211netdev.h"
#include "p80211req.h"
#include "p80211metadef.h"
#include "p80211metastruct.h"
#include "hfa384x.h"
#include "prism2mgmt.h"
enum cmd_mode {
DOWAIT = 0,
DOASYNC
};
#define THROTTLE_JIFFIES (HZ / 8)
#define URB_ASYNC_UNLINK 0
#define USB_QUEUE_BULK 0
#define ROUNDUP64(a) (((a) + 63) & ~63)
#ifdef DEBUG_USB
static void dbprint_urb(struct urb *urb);
#endif
static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
struct hfa384x_usb_rxfrm *rxfrm);
static void hfa384x_usb_defer(struct work_struct *data);
static int submit_rx_urb(struct hfa384x *hw, gfp_t flags);
static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t flags);
/*---------------------------------------------------*/
/* Callbacks */
static void hfa384x_usbout_callback(struct urb *urb);
static void hfa384x_ctlxout_callback(struct urb *urb);
static void hfa384x_usbin_callback(struct urb *urb);
static void
hfa384x_usbin_txcompl(struct wlandevice *wlandev, union hfa384x_usbin *usbin);
static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb);
static void hfa384x_usbin_info(struct wlandevice *wlandev,
union hfa384x_usbin *usbin);
static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
int urb_status);
/*---------------------------------------------------*/
/* Functions to support the prism2 usb command queue */
static void hfa384x_usbctlxq_run(struct hfa384x *hw);
static void hfa384x_usbctlx_reqtimerfn(struct timer_list *t);
static void hfa384x_usbctlx_resptimerfn(struct timer_list *t);
static void hfa384x_usb_throttlefn(struct timer_list *t);
static void hfa384x_usbctlx_completion_task(struct work_struct *work);
static void hfa384x_usbctlx_reaper_task(struct work_struct *work);
static int hfa384x_usbctlx_submit(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx);
static void unlocked_usbctlx_complete(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx);
struct usbctlx_completor {
int (*complete)(struct usbctlx_completor *completor);
};
static int
hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx,
struct usbctlx_completor *completor);
static int
unlocked_usbctlx_cancel_async(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
static void hfa384x_cb_status(struct hfa384x *hw,
const struct hfa384x_usbctlx *ctlx);
static int
usbctlx_get_status(const struct hfa384x_usb_statusresp *cmdresp,
struct hfa384x_cmdresult *result);
static void
usbctlx_get_rridresult(const struct hfa384x_usb_rridresp *rridresp,
struct hfa384x_rridresult *result);
/*---------------------------------------------------*/
/* Low level req/resp CTLX formatters and submitters */
static inline int
hfa384x_docmd(struct hfa384x *hw,
struct hfa384x_metacmd *cmd);
static int
hfa384x_dorrid(struct hfa384x *hw,
enum cmd_mode mode,
u16 rid,
void *riddata,
unsigned int riddatalen,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data);
static int
hfa384x_dowrid(struct hfa384x *hw,
enum cmd_mode mode,
u16 rid,
void *riddata,
unsigned int riddatalen,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data);
static int
hfa384x_dormem(struct hfa384x *hw,
u16 page,
u16 offset,
void *data,
unsigned int len);
static int
hfa384x_dowmem(struct hfa384x *hw,
u16 page,
u16 offset,
void *data,
unsigned int len);
static int hfa384x_isgood_pdrcode(u16 pdrcode);
static inline const char *ctlxstr(enum ctlx_state s)
{
static const char * const ctlx_str[] = {
"Initial state",
"Complete",
"Request failed",
"Request pending",
"Request packet submitted",
"Request packet completed",
"Response packet completed"
};
return ctlx_str[s];
};
static inline struct hfa384x_usbctlx *get_active_ctlx(struct hfa384x *hw)
{
return list_entry(hw->ctlxq.active.next, struct hfa384x_usbctlx, list);
}
#ifdef DEBUG_USB
void dbprint_urb(struct urb *urb)
{
pr_debug("urb->pipe=0x%08x\n", urb->pipe);
pr_debug("urb->status=0x%08x\n", urb->status);
pr_debug("urb->transfer_flags=0x%08x\n", urb->transfer_flags);
pr_debug("urb->transfer_buffer=0x%08x\n",
(unsigned int)urb->transfer_buffer);
pr_debug("urb->transfer_buffer_length=0x%08x\n",
urb->transfer_buffer_length);
pr_debug("urb->actual_length=0x%08x\n", urb->actual_length);
pr_debug("urb->setup_packet(ctl)=0x%08x\n",
(unsigned int)urb->setup_packet);
pr_debug("urb->start_frame(iso/irq)=0x%08x\n", urb->start_frame);
pr_debug("urb->interval(irq)=0x%08x\n", urb->interval);
pr_debug("urb->error_count(iso)=0x%08x\n", urb->error_count);
pr_debug("urb->context=0x%08x\n", (unsigned int)urb->context);
pr_debug("urb->complete=0x%08x\n", (unsigned int)urb->complete);
}
#endif
/*----------------------------------------------------------------
* submit_rx_urb
*
* Listen for input data on the BULK-IN pipe. If the pipe has
* stalled then schedule it to be reset.
*
* Arguments:
* hw device struct
* memflags memory allocation flags
*
* Returns:
* error code from submission
*
* Call context:
* Any
*----------------------------------------------------------------
*/
static int submit_rx_urb(struct hfa384x *hw, gfp_t memflags)
{
struct sk_buff *skb;
int result;
skb = dev_alloc_skb(sizeof(union hfa384x_usbin));
if (!skb) {
result = -ENOMEM;
goto done;
}
/* Post the IN urb */
usb_fill_bulk_urb(&hw->rx_urb, hw->usb,
hw->endp_in,
skb->data, sizeof(union hfa384x_usbin),
hfa384x_usbin_callback, hw->wlandev);
hw->rx_urb_skb = skb;
result = -ENOLINK;
if (!hw->wlandev->hwremoved &&
!test_bit(WORK_RX_HALT, &hw->usb_flags)) {
result = usb_submit_urb(&hw->rx_urb, memflags);
/* Check whether we need to reset the RX pipe */
if (result == -EPIPE) {
netdev_warn(hw->wlandev->netdev,
"%s rx pipe stalled: requesting reset\n",
hw->wlandev->netdev->name);
if (!test_and_set_bit(WORK_RX_HALT, &hw->usb_flags))
schedule_work(&hw->usb_work);
}
}
/* Don't leak memory if anything should go wrong */
if (result != 0) {
dev_kfree_skb(skb);
hw->rx_urb_skb = NULL;
}
done:
return result;
}
/*----------------------------------------------------------------
* submit_tx_urb
*
* Prepares and submits the URB of transmitted data. If the
* submission fails then it will schedule the output pipe to
* be reset.
*
* Arguments:
* hw device struct
* tx_urb URB of data for transmission
* memflags memory allocation flags
*
* Returns:
* error code from submission
*
* Call context:
* Any
*----------------------------------------------------------------
*/
static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags)
{
struct net_device *netdev = hw->wlandev->netdev;
int result;
result = -ENOLINK;
if (netif_running(netdev)) {
if (!hw->wlandev->hwremoved &&
!test_bit(WORK_TX_HALT, &hw->usb_flags)) {
result = usb_submit_urb(tx_urb, memflags);
/* Test whether we need to reset the TX pipe */
if (result == -EPIPE) {
netdev_warn(hw->wlandev->netdev,
"%s tx pipe stalled: requesting reset\n",
netdev->name);
set_bit(WORK_TX_HALT, &hw->usb_flags);
schedule_work(&hw->usb_work);
} else if (result == 0) {
netif_stop_queue(netdev);
}
}
}
return result;
}
/*----------------------------------------------------------------
* hfa394x_usb_defer
*
* There are some things that the USB stack cannot do while
* in interrupt context, so we arrange this function to run
* in process context.
*
* Arguments:
* hw device structure
*
* Returns:
* nothing
*
* Call context:
* process (by design)
*----------------------------------------------------------------
*/
static void hfa384x_usb_defer(struct work_struct *data)
{
struct hfa384x *hw = container_of(data, struct hfa384x, usb_work);
struct net_device *netdev = hw->wlandev->netdev;
/* Don't bother trying to reset anything if the plug
* has been pulled ...
*/
if (hw->wlandev->hwremoved)
return;
/* Reception has stopped: try to reset the input pipe */
if (test_bit(WORK_RX_HALT, &hw->usb_flags)) {
int ret;
usb_kill_urb(&hw->rx_urb); /* Cannot be holding spinlock! */
ret = usb_clear_halt(hw->usb, hw->endp_in);
if (ret != 0) {
netdev_err(hw->wlandev->netdev,
"Failed to clear rx pipe for %s: err=%d\n",
netdev->name, ret);
} else {
netdev_info(hw->wlandev->netdev, "%s rx pipe reset complete.\n",
netdev->name);
clear_bit(WORK_RX_HALT, &hw->usb_flags);
set_bit(WORK_RX_RESUME, &hw->usb_flags);
}
}
/* Resume receiving data back from the device. */
if (test_bit(WORK_RX_RESUME, &hw->usb_flags)) {
int ret;
ret = submit_rx_urb(hw, GFP_KERNEL);
if (ret != 0) {
netdev_err(hw->wlandev->netdev,
"Failed to resume %s rx pipe.\n",
netdev->name);
} else {
clear_bit(WORK_RX_RESUME, &hw->usb_flags);
}
}
/* Transmission has stopped: try to reset the output pipe */
if (test_bit(WORK_TX_HALT, &hw->usb_flags)) {
int ret;
usb_kill_urb(&hw->tx_urb);
ret = usb_clear_halt(hw->usb, hw->endp_out);
if (ret != 0) {
netdev_err(hw->wlandev->netdev,
"Failed to clear tx pipe for %s: err=%d\n",
netdev->name, ret);
} else {
netdev_info(hw->wlandev->netdev, "%s tx pipe reset complete.\n",
netdev->name);
clear_bit(WORK_TX_HALT, &hw->usb_flags);
set_bit(WORK_TX_RESUME, &hw->usb_flags);
/* Stopping the BULK-OUT pipe also blocked
* us from sending any more CTLX URBs, so
* we need to re-run our queue ...
*/
hfa384x_usbctlxq_run(hw);
}
}
/* Resume transmitting. */
if (test_and_clear_bit(WORK_TX_RESUME, &hw->usb_flags))
netif_wake_queue(hw->wlandev->netdev);
}
/*----------------------------------------------------------------
* hfa384x_create
*
* Sets up the struct hfa384x data structure for use. Note this
* does _not_ initialize the actual hardware, just the data structures
* we use to keep track of its state.
*
* Arguments:
* hw device structure
* irq device irq number
* iobase i/o base address for register access
* membase memory base address for register access
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
{
hw->usb = usb;
/* Set up the waitq */
init_waitqueue_head(&hw->cmdq);
/* Initialize the command queue */
spin_lock_init(&hw->ctlxq.lock);
INIT_LIST_HEAD(&hw->ctlxq.pending);
INIT_LIST_HEAD(&hw->ctlxq.active);
INIT_LIST_HEAD(&hw->ctlxq.completing);
INIT_LIST_HEAD(&hw->ctlxq.reapable);
/* Initialize the authentication queue */
skb_queue_head_init(&hw->authq);
INIT_WORK(&hw->reaper_bh, hfa384x_usbctlx_reaper_task);
INIT_WORK(&hw->completion_bh, hfa384x_usbctlx_completion_task);
INIT_WORK(&hw->link_bh, prism2sta_processing_defer);
INIT_WORK(&hw->usb_work, hfa384x_usb_defer);
timer_setup(&hw->throttle, hfa384x_usb_throttlefn, 0);
timer_setup(&hw->resptimer, hfa384x_usbctlx_resptimerfn, 0);
timer_setup(&hw->reqtimer, hfa384x_usbctlx_reqtimerfn, 0);
usb_init_urb(&hw->rx_urb);
usb_init_urb(&hw->tx_urb);
usb_init_urb(&hw->ctlx_urb);
hw->link_status = HFA384x_LINK_NOTCONNECTED;
hw->state = HFA384x_STATE_INIT;
INIT_WORK(&hw->commsqual_bh, prism2sta_commsqual_defer);
timer_setup(&hw->commsqual_timer, prism2sta_commsqual_timer, 0);
}
/*----------------------------------------------------------------
* hfa384x_destroy
*
* Partner to hfa384x_create(). This function cleans up the hw
* structure so that it can be freed by the caller using a simple
* kfree. Currently, this function is just a placeholder. If, at some
* point in the future, an hw in the 'shutdown' state requires a 'deep'
* kfree, this is where it should be done. Note that if this function
* is called on a _running_ hw structure, the drvr_stop() function is
* called.
*
* Arguments:
* hw device structure
*
* Returns:
* nothing, this function is not allowed to fail.
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
void hfa384x_destroy(struct hfa384x *hw)
{
struct sk_buff *skb;
if (hw->state == HFA384x_STATE_RUNNING)
hfa384x_drvr_stop(hw);
hw->state = HFA384x_STATE_PREINIT;
kfree(hw->scanresults);
hw->scanresults = NULL;
/* Now to clean out the auth queue */
while ((skb = skb_dequeue(&hw->authq)))
dev_kfree_skb(skb);
}
static struct hfa384x_usbctlx *usbctlx_alloc(void)
{
struct hfa384x_usbctlx *ctlx;
ctlx = kzalloc(sizeof(*ctlx),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
if (ctlx)
init_completion(&ctlx->done);
return ctlx;
}
static int
usbctlx_get_status(const struct hfa384x_usb_statusresp *cmdresp,
struct hfa384x_cmdresult *result)
{
result->status = le16_to_cpu(cmdresp->status);
result->resp0 = le16_to_cpu(cmdresp->resp0);
result->resp1 = le16_to_cpu(cmdresp->resp1);
result->resp2 = le16_to_cpu(cmdresp->resp2);
pr_debug("cmdresult:status=0x%04x resp0=0x%04x resp1=0x%04x resp2=0x%04x\n",
result->status, result->resp0, result->resp1, result->resp2);
return result->status & HFA384x_STATUS_RESULT;
}
static void
usbctlx_get_rridresult(const struct hfa384x_usb_rridresp *rridresp,
struct hfa384x_rridresult *result)
{
result->rid = le16_to_cpu(rridresp->rid);
result->riddata = rridresp->data;
result->riddata_len = ((le16_to_cpu(rridresp->frmlen) - 1) * 2);
}
/*----------------------------------------------------------------
* Completor object:
* This completor must be passed to hfa384x_usbctlx_complete_sync()
* when processing a CTLX that returns a struct hfa384x_cmdresult structure.
*----------------------------------------------------------------
*/
struct usbctlx_cmd_completor {
struct usbctlx_completor head;
const struct hfa384x_usb_statusresp *cmdresp;
struct hfa384x_cmdresult *result;
};
static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head)
{
struct usbctlx_cmd_completor *complete;
complete = (struct usbctlx_cmd_completor *)head;
return usbctlx_get_status(complete->cmdresp, complete->result);
}
static inline struct usbctlx_completor *
init_cmd_completor(struct usbctlx_cmd_completor *completor,
const struct hfa384x_usb_statusresp *cmdresp,
struct hfa384x_cmdresult *result)
{
completor->head.complete = usbctlx_cmd_completor_fn;
completor->cmdresp = cmdresp;
completor->result = result;
return &completor->head;
}
/*----------------------------------------------------------------
* Completor object:
* This completor must be passed to hfa384x_usbctlx_complete_sync()
* when processing a CTLX that reads a RID.
*----------------------------------------------------------------
*/
struct usbctlx_rrid_completor {
struct usbctlx_completor head;
const struct hfa384x_usb_rridresp *rridresp;
void *riddata;
unsigned int riddatalen;
};
static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head)
{
struct usbctlx_rrid_completor *complete;
struct hfa384x_rridresult rridresult;
complete = (struct usbctlx_rrid_completor *)head;
usbctlx_get_rridresult(complete->rridresp, &rridresult);
/* Validate the length, note body len calculation in bytes */
if (rridresult.riddata_len != complete->riddatalen) {
pr_warn("RID len mismatch, rid=0x%04x hlen=%d fwlen=%d\n",
rridresult.rid,
complete->riddatalen, rridresult.riddata_len);
return -ENODATA;
}
memcpy(complete->riddata, rridresult.riddata, complete->riddatalen);
return 0;
}
static inline struct usbctlx_completor *
init_rrid_completor(struct usbctlx_rrid_completor *completor,
const struct hfa384x_usb_rridresp *rridresp,
void *riddata,
unsigned int riddatalen)
{
completor->head.complete = usbctlx_rrid_completor_fn;
completor->rridresp = rridresp;
completor->riddata = riddata;
completor->riddatalen = riddatalen;
return &completor->head;
}
/*----------------------------------------------------------------
* Completor object:
* Interprets the results of a synchronous RID-write
*----------------------------------------------------------------
*/
#define init_wrid_completor init_cmd_completor
/*----------------------------------------------------------------
* Completor object:
* Interprets the results of a synchronous memory-write
*----------------------------------------------------------------
*/
#define init_wmem_completor init_cmd_completor
/*----------------------------------------------------------------
* Completor object:
* Interprets the results of a synchronous memory-read
*----------------------------------------------------------------
*/
struct usbctlx_rmem_completor {
struct usbctlx_completor head;
const struct hfa384x_usb_rmemresp *rmemresp;
void *data;
unsigned int len;
};
static int usbctlx_rmem_completor_fn(struct usbctlx_completor *head)
{
struct usbctlx_rmem_completor *complete =
(struct usbctlx_rmem_completor *)head;
pr_debug("rmemresp:len=%d\n", complete->rmemresp->frmlen);
memcpy(complete->data, complete->rmemresp->data, complete->len);
return 0;
}
static inline struct usbctlx_completor *
init_rmem_completor(struct usbctlx_rmem_completor *completor,
struct hfa384x_usb_rmemresp *rmemresp,
void *data,
unsigned int len)
{
completor->head.complete = usbctlx_rmem_completor_fn;
completor->rmemresp = rmemresp;
completor->data = data;
completor->len = len;
return &completor->head;
}
/*----------------------------------------------------------------
* hfa384x_cb_status
*
* Ctlx_complete handler for async CMD type control exchanges.
* mark the hw struct as such.
*
* Note: If the handling is changed here, it should probably be
* changed in docmd as well.
*
* Arguments:
* hw hw struct
* ctlx completed CTLX
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static void hfa384x_cb_status(struct hfa384x *hw,
const struct hfa384x_usbctlx *ctlx)
{
if (ctlx->usercb) {
struct hfa384x_cmdresult cmdresult;
if (ctlx->state != CTLX_COMPLETE) {
memset(&cmdresult, 0, sizeof(cmdresult));
cmdresult.status =
HFA384x_STATUS_RESULT_SET(HFA384x_CMD_ERR);
} else {
usbctlx_get_status(&ctlx->inbuf.cmdresp, &cmdresult);
}
ctlx->usercb(hw, &cmdresult, ctlx->usercb_data);
}
}
/*----------------------------------------------------------------
* hfa384x_cmd_initialize
*
* Issues the initialize command and sets the hw->state based
* on the result.
*
* Arguments:
* hw device structure
*
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_cmd_initialize(struct hfa384x *hw)
{
int result = 0;
int i;
struct hfa384x_metacmd cmd;
cmd.cmd = HFA384x_CMDCODE_INIT;
cmd.parm0 = 0;
cmd.parm1 = 0;
cmd.parm2 = 0;
result = hfa384x_docmd(hw, &cmd);
pr_debug("cmdresp.init: status=0x%04x, resp0=0x%04x, resp1=0x%04x, resp2=0x%04x\n",
cmd.result.status,
cmd.result.resp0, cmd.result.resp1, cmd.result.resp2);
if (result == 0) {
for (i = 0; i < HFA384x_NUMPORTS_MAX; i++)
hw->port_enabled[i] = 0;
}
hw->link_status = HFA384x_LINK_NOTCONNECTED;
return result;
}
/*----------------------------------------------------------------
* hfa384x_cmd_disable
*
* Issues the disable command to stop communications on one of
* the MACs 'ports'.
*
* Arguments:
* hw device structure
* macport MAC port number (host order)
*
* Returns:
* 0 success
* >0 f/w reported failure - f/w status code
* <0 driver reported error (timeout|bad arg)
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport)
{
struct hfa384x_metacmd cmd;
cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_DISABLE) |
HFA384x_CMD_MACPORT_SET(macport);
cmd.parm0 = 0;
cmd.parm1 = 0;
cmd.parm2 = 0;
return hfa384x_docmd(hw, &cmd);
}
/*----------------------------------------------------------------
* hfa384x_cmd_enable
*
* Issues the enable command to enable communications on one of
* the MACs 'ports'.
*
* Arguments:
* hw device structure
* macport MAC port number
*
* Returns:
* 0 success
* >0 f/w reported failure - f/w status code
* <0 driver reported error (timeout|bad arg)
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport)
{
struct hfa384x_metacmd cmd;
cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_ENABLE) |
HFA384x_CMD_MACPORT_SET(macport);
cmd.parm0 = 0;
cmd.parm1 = 0;
cmd.parm2 = 0;
return hfa384x_docmd(hw, &cmd);
}
/*----------------------------------------------------------------
* hfa384x_cmd_monitor
*
* Enables the 'monitor mode' of the MAC. Here's the description of
* monitor mode that I've received thus far:
*
* "The "monitor mode" of operation is that the MAC passes all
* frames for which the PLCP checks are correct. All received
* MPDUs are passed to the host with MAC Port = 7, with a
* receive status of good, FCS error, or undecryptable. Passing
* certain MPDUs is a violation of the 802.11 standard, but useful
* for a debugging tool." Normal communication is not possible
* while monitor mode is enabled.
*
* Arguments:
* hw device structure
* enable a code (0x0b|0x0f) that enables/disables
* monitor mode. (host order)
*
* Returns:
* 0 success
* >0 f/w reported failure - f/w status code
* <0 driver reported error (timeout|bad arg)
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable)
{
struct hfa384x_metacmd cmd;
cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_MONITOR) |
HFA384x_CMD_AINFO_SET(enable);
cmd.parm0 = 0;
cmd.parm1 = 0;
cmd.parm2 = 0;
return hfa384x_docmd(hw, &cmd);
}
/*----------------------------------------------------------------
* hfa384x_cmd_download
*
* Sets the controls for the MAC controller code/data download
* process. The arguments set the mode and address associated
* with a download. Note that the aux registers should be enabled
* prior to setting one of the download enable modes.
*
* Arguments:
* hw device structure
* mode 0 - Disable programming and begin code exec
* 1 - Enable volatile mem programming
* 2 - Enable non-volatile mem programming
* 3 - Program non-volatile section from NV download
* buffer.
* (host order)
* lowaddr
* highaddr For mode 1, sets the high & low order bits of
* the "destination address". This address will be
* the execution start address when download is
* subsequently disabled.
* For mode 2, sets the high & low order bits of
* the destination in NV ram.
* For modes 0 & 3, should be zero. (host order)
* NOTE: these are CMD format.
* codelen Length of the data to write in mode 2,
* zero otherwise. (host order)
*
* Returns:
* 0 success
* >0 f/w reported failure - f/w status code
* <0 driver reported error (timeout|bad arg)
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr,
u16 highaddr, u16 codelen)
{
struct hfa384x_metacmd cmd;
pr_debug("mode=%d, lowaddr=0x%04x, highaddr=0x%04x, codelen=%d\n",
mode, lowaddr, highaddr, codelen);
cmd.cmd = (HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_DOWNLD) |
HFA384x_CMD_PROGMODE_SET(mode));
cmd.parm0 = lowaddr;
cmd.parm1 = highaddr;
cmd.parm2 = codelen;
return hfa384x_docmd(hw, &cmd);
}
/*----------------------------------------------------------------
* hfa384x_corereset
*
* Perform a reset of the hfa38xx MAC core. We assume that the hw
* structure is in its "created" state. That is, it is initialized
* with proper values. Note that if a reset is done after the
* device has been active for awhile, the caller might have to clean
* up some leftover cruft in the hw structure.
*
* Arguments:
* hw device structure
* holdtime how long (in ms) to hold the reset
* settletime how long (in ms) to wait after releasing
* the reset
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_corereset(struct hfa384x *hw, int holdtime,
int settletime, int genesis)
{
int result;
result = usb_reset_device(hw->usb);
if (result < 0) {
netdev_err(hw->wlandev->netdev, "usb_reset_device() failed, result=%d.\n",
result);
}
return result;
}
/*----------------------------------------------------------------
* hfa384x_usbctlx_complete_sync
*
* Waits for a synchronous CTLX object to complete,
* and then handles the response.
*
* Arguments:
* hw device structure
* ctlx CTLX ptr
* completor functor object to decide what to
* do with the CTLX's result.
*
* Returns:
* 0 Success
* -ERESTARTSYS Interrupted by a signal
* -EIO CTLX failed
* -ENODEV Adapter was unplugged
* ??? Result from completor
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
static int hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx,
struct usbctlx_completor *completor)
{
unsigned long flags;
int result;
result = wait_for_completion_interruptible(&ctlx->done);
spin_lock_irqsave(&hw->ctlxq.lock, flags);
/*
* We can only handle the CTLX if the USB disconnect
* function has not run yet ...
*/
cleanup:
if (hw->wlandev->hwremoved) {
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
result = -ENODEV;
} else if (result != 0) {
int runqueue = 0;
/*
* We were probably interrupted, so delete
* this CTLX asynchronously, kill the timers
* and the URB, and then start the next
* pending CTLX.
*
* NOTE: We can only delete the timers and
* the URB if this CTLX is active.
*/
if (ctlx == get_active_ctlx(hw)) {
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
del_timer_sync(&hw->reqtimer);
del_timer_sync(&hw->resptimer);
hw->req_timer_done = 1;
hw->resp_timer_done = 1;
usb_kill_urb(&hw->ctlx_urb);
spin_lock_irqsave(&hw->ctlxq.lock, flags);
runqueue = 1;
/*
* This scenario is so unlikely that I'm
* happy with a grubby "goto" solution ...
*/
if (hw->wlandev->hwremoved)
goto cleanup;
}
/*
* The completion task will send this CTLX
* to the reaper the next time it runs. We
* are no longer in a hurry.
*/
ctlx->reapable = 1;
ctlx->state = CTLX_REQ_FAILED;
list_move_tail(&ctlx->list, &hw->ctlxq.completing);
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
if (runqueue)
hfa384x_usbctlxq_run(hw);
} else {
if (ctlx->state == CTLX_COMPLETE) {
result = completor->complete(completor);
} else {
netdev_warn(hw->wlandev->netdev, "CTLX[%d] error: state(%s)\n",
le16_to_cpu(ctlx->outbuf.type),
ctlxstr(ctlx->state));
result = -EIO;
}
list_del(&ctlx->list);
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
kfree(ctlx);
}
return result;
}
/*----------------------------------------------------------------
* hfa384x_docmd
*
* Constructs a command CTLX and submits it.
*
* NOTE: Any changes to the 'post-submit' code in this function
* need to be carried over to hfa384x_cbcmd() since the handling
* is virtually identical.
*
* Arguments:
* hw device structure
* cmd cmd structure. Includes all arguments and result
* data points. All in host order. in host order
*
* Returns:
* 0 success
* -EIO CTLX failure
* -ERESTARTSYS Awakened on signal
* >0 command indicated error, Status and Resp0-2 are
* in hw structure.
*
* Side effects:
*
*
* Call context:
* process
*----------------------------------------------------------------
*/
static inline int
hfa384x_docmd(struct hfa384x *hw,
struct hfa384x_metacmd *cmd)
{
int result;
struct hfa384x_usbctlx *ctlx;
ctlx = usbctlx_alloc();
if (!ctlx) {
result = -ENOMEM;
goto done;
}
/* Initialize the command */
ctlx->outbuf.cmdreq.type = cpu_to_le16(HFA384x_USB_CMDREQ);
ctlx->outbuf.cmdreq.cmd = cpu_to_le16(cmd->cmd);
ctlx->outbuf.cmdreq.parm0 = cpu_to_le16(cmd->parm0);
ctlx->outbuf.cmdreq.parm1 = cpu_to_le16(cmd->parm1);
ctlx->outbuf.cmdreq.parm2 = cpu_to_le16(cmd->parm2);
ctlx->outbufsize = sizeof(ctlx->outbuf.cmdreq);
pr_debug("cmdreq: cmd=0x%04x parm0=0x%04x parm1=0x%04x parm2=0x%04x\n",
cmd->cmd, cmd->parm0, cmd->parm1, cmd->parm2);
ctlx->reapable = DOWAIT;
ctlx->cmdcb = NULL;
ctlx->usercb = NULL;
ctlx->usercb_data = NULL;
result = hfa384x_usbctlx_submit(hw, ctlx);
if (result != 0) {
kfree(ctlx);
} else {
struct usbctlx_cmd_completor cmd_completor;
struct usbctlx_completor *completor;
completor = init_cmd_completor(&cmd_completor,
&ctlx->inbuf.cmdresp,
&cmd->result);
result = hfa384x_usbctlx_complete_sync(hw, ctlx, completor);
}
done:
return result;
}
/*----------------------------------------------------------------
* hfa384x_dorrid
*
* Constructs a read rid CTLX and issues it.
*
* NOTE: Any changes to the 'post-submit' code in this function
* need to be carried over to hfa384x_cbrrid() since the handling
* is virtually identical.
*
* Arguments:
* hw device structure
* mode DOWAIT or DOASYNC
* rid Read RID number (host order)
* riddata Caller supplied buffer that MAC formatted RID.data
* record will be written to for DOWAIT calls. Should
* be NULL for DOASYNC calls.
* riddatalen Buffer length for DOWAIT calls. Zero for DOASYNC calls.
* cmdcb command callback for async calls, NULL for DOWAIT calls
* usercb user callback for async calls, NULL for DOWAIT calls
* usercb_data user supplied data pointer for async calls, NULL
* for DOWAIT calls
*
* Returns:
* 0 success
* -EIO CTLX failure
* -ERESTARTSYS Awakened on signal
* -ENODATA riddatalen != macdatalen
* >0 command indicated error, Status and Resp0-2 are
* in hw structure.
*
* Side effects:
*
* Call context:
* interrupt (DOASYNC)
* process (DOWAIT or DOASYNC)
*----------------------------------------------------------------
*/
static int
hfa384x_dorrid(struct hfa384x *hw,
enum cmd_mode mode,
u16 rid,
void *riddata,
unsigned int riddatalen,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
{
int result;
struct hfa384x_usbctlx *ctlx;
ctlx = usbctlx_alloc();
if (!ctlx) {
result = -ENOMEM;
goto done;
}
/* Initialize the command */
ctlx->outbuf.rridreq.type = cpu_to_le16(HFA384x_USB_RRIDREQ);
ctlx->outbuf.rridreq.frmlen =
cpu_to_le16(sizeof(ctlx->outbuf.rridreq.rid));
ctlx->outbuf.rridreq.rid = cpu_to_le16(rid);
ctlx->outbufsize = sizeof(ctlx->outbuf.rridreq);
ctlx->reapable = mode;
ctlx->cmdcb = cmdcb;
ctlx->usercb = usercb;
ctlx->usercb_data = usercb_data;
/* Submit the CTLX */
result = hfa384x_usbctlx_submit(hw, ctlx);
if (result != 0) {
kfree(ctlx);
} else if (mode == DOWAIT) {
struct usbctlx_rrid_completor completor;
result =
hfa384x_usbctlx_complete_sync(hw, ctlx,
init_rrid_completor
(&completor,
&ctlx->inbuf.rridresp,
riddata, riddatalen));
}
done:
return result;
}
/*----------------------------------------------------------------
* hfa384x_dowrid
*
* Constructs a write rid CTLX and issues it.
*
* NOTE: Any changes to the 'post-submit' code in this function
* need to be carried over to hfa384x_cbwrid() since the handling
* is virtually identical.
*
* Arguments:
* hw device structure
* enum cmd_mode DOWAIT or DOASYNC
* rid RID code
* riddata Data portion of RID formatted for MAC
* riddatalen Length of the data portion in bytes
* cmdcb command callback for async calls, NULL for DOWAIT calls
* usercb user callback for async calls, NULL for DOWAIT calls
* usercb_data user supplied data pointer for async calls
*
* Returns:
* 0 success
* -ETIMEDOUT timed out waiting for register ready or
* command completion
* >0 command indicated error, Status and Resp0-2 are
* in hw structure.
*
* Side effects:
*
* Call context:
* interrupt (DOASYNC)
* process (DOWAIT or DOASYNC)
*----------------------------------------------------------------
*/
static int
hfa384x_dowrid(struct hfa384x *hw,
enum cmd_mode mode,
u16 rid,
void *riddata,
unsigned int riddatalen,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
{
int result;
struct hfa384x_usbctlx *ctlx;
ctlx = usbctlx_alloc();
if (!ctlx) {
result = -ENOMEM;
goto done;
}
/* Initialize the command */
ctlx->outbuf.wridreq.type = cpu_to_le16(HFA384x_USB_WRIDREQ);
ctlx->outbuf.wridreq.frmlen = cpu_to_le16((sizeof
(ctlx->outbuf.wridreq.rid) +
riddatalen + 1) / 2);
ctlx->outbuf.wridreq.rid = cpu_to_le16(rid);
memcpy(ctlx->outbuf.wridreq.data, riddata, riddatalen);
ctlx->outbufsize = sizeof(ctlx->outbuf.wridreq.type) +
sizeof(ctlx->outbuf.wridreq.frmlen) +
sizeof(ctlx->outbuf.wridreq.rid) + riddatalen;
ctlx->reapable = mode;
ctlx->cmdcb = cmdcb;
ctlx->usercb = usercb;
ctlx->usercb_data = usercb_data;
/* Submit the CTLX */
result = hfa384x_usbctlx_submit(hw, ctlx);
if (result != 0) {
kfree(ctlx);
} else if (mode == DOWAIT) {
struct usbctlx_cmd_completor completor;
struct hfa384x_cmdresult wridresult;
result = hfa384x_usbctlx_complete_sync(hw,
ctlx,
init_wrid_completor
(&completor,
&ctlx->inbuf.wridresp,
&wridresult));
}
done:
return result;
}
/*----------------------------------------------------------------
* hfa384x_dormem
*
* Constructs a readmem CTLX and issues it.
*
* NOTE: Any changes to the 'post-submit' code in this function
* need to be carried over to hfa384x_cbrmem() since the handling
* is virtually identical.
*
* Arguments:
* hw device structure
* page MAC address space page (CMD format)
* offset MAC address space offset
* data Ptr to data buffer to receive read
* len Length of the data to read (max == 2048)
*
* Returns:
* 0 success
* -ETIMEDOUT timed out waiting for register ready or
* command completion
* >0 command indicated error, Status and Resp0-2 are
* in hw structure.
*
* Side effects:
*
* Call context:
* process (DOWAIT)
*----------------------------------------------------------------
*/
static int
hfa384x_dormem(struct hfa384x *hw,
u16 page,
u16 offset,
void *data,
unsigned int len)
{
int result;
struct hfa384x_usbctlx *ctlx;
ctlx = usbctlx_alloc();
if (!ctlx) {
result = -ENOMEM;
goto done;
}
/* Initialize the command */
ctlx->outbuf.rmemreq.type = cpu_to_le16(HFA384x_USB_RMEMREQ);
ctlx->outbuf.rmemreq.frmlen =
cpu_to_le16(sizeof(ctlx->outbuf.rmemreq.offset) +
sizeof(ctlx->outbuf.rmemreq.page) + len);
ctlx->outbuf.rmemreq.offset = cpu_to_le16(offset);
ctlx->outbuf.rmemreq.page = cpu_to_le16(page);
ctlx->outbufsize = sizeof(ctlx->outbuf.rmemreq);
pr_debug("type=0x%04x frmlen=%d offset=0x%04x page=0x%04x\n",
ctlx->outbuf.rmemreq.type,
ctlx->outbuf.rmemreq.frmlen,
ctlx->outbuf.rmemreq.offset, ctlx->outbuf.rmemreq.page);
pr_debug("pktsize=%zd\n", ROUNDUP64(sizeof(ctlx->outbuf.rmemreq)));
ctlx->reapable = DOWAIT;
ctlx->cmdcb = NULL;
ctlx->usercb = NULL;
ctlx->usercb_data = NULL;
result = hfa384x_usbctlx_submit(hw, ctlx);
if (result != 0) {
kfree(ctlx);
} else {
struct usbctlx_rmem_completor completor;
result =
hfa384x_usbctlx_complete_sync(hw, ctlx,
init_rmem_completor
(&completor,
&ctlx->inbuf.rmemresp, data,
len));
}
done:
return result;
}
/*----------------------------------------------------------------
* hfa384x_dowmem
*
* Constructs a writemem CTLX and issues it.
*
* NOTE: Any changes to the 'post-submit' code in this function
* need to be carried over to hfa384x_cbwmem() since the handling
* is virtually identical.
*
* Arguments:
* hw device structure
* page MAC address space page (CMD format)
* offset MAC address space offset
* data Ptr to data buffer containing write data
* len Length of the data to read (max == 2048)
*
* Returns:
* 0 success
* -ETIMEDOUT timed out waiting for register ready or
* command completion
* >0 command indicated error, Status and Resp0-2 are
* in hw structure.
*
* Side effects:
*
* Call context:
* interrupt (DOWAIT)
* process (DOWAIT)
*----------------------------------------------------------------
*/
static int
hfa384x_dowmem(struct hfa384x *hw,
u16 page,
u16 offset,
void *data,
unsigned int len)
{
int result;
struct hfa384x_usbctlx *ctlx;
pr_debug("page=0x%04x offset=0x%04x len=%d\n", page, offset, len);
ctlx = usbctlx_alloc();
if (!ctlx) {
result = -ENOMEM;
goto done;
}
/* Initialize the command */
ctlx->outbuf.wmemreq.type = cpu_to_le16(HFA384x_USB_WMEMREQ);
ctlx->outbuf.wmemreq.frmlen =
cpu_to_le16(sizeof(ctlx->outbuf.wmemreq.offset) +
sizeof(ctlx->outbuf.wmemreq.page) + len);
ctlx->outbuf.wmemreq.offset = cpu_to_le16(offset);
ctlx->outbuf.wmemreq.page = cpu_to_le16(page);
memcpy(ctlx->outbuf.wmemreq.data, data, len);
ctlx->outbufsize = sizeof(ctlx->outbuf.wmemreq.type) +
sizeof(ctlx->outbuf.wmemreq.frmlen) +
sizeof(ctlx->outbuf.wmemreq.offset) +
sizeof(ctlx->outbuf.wmemreq.page) + len;
ctlx->reapable = DOWAIT;
ctlx->cmdcb = NULL;
ctlx->usercb = NULL;
ctlx->usercb_data = NULL;
result = hfa384x_usbctlx_submit(hw, ctlx);
if (result != 0) {
kfree(ctlx);
} else {
struct usbctlx_cmd_completor completor;
struct hfa384x_cmdresult wmemresult;
result = hfa384x_usbctlx_complete_sync(hw,
ctlx,
init_wmem_completor
(&completor,
&ctlx->inbuf.wmemresp,
&wmemresult));
}
done:
return result;
}
/*----------------------------------------------------------------
* hfa384x_drvr_disable
*
* Issues the disable command to stop communications on one of
* the MACs 'ports'. Only macport 0 is valid for stations.
* APs may also disable macports 1-6. Only ports that have been
* previously enabled may be disabled.
*
* Arguments:
* hw device structure
* macport MAC port number (host order)
*
* Returns:
* 0 success
* >0 f/w reported failure - f/w status code
* <0 driver reported error (timeout|bad arg)
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport)
{
int result = 0;
if ((!hw->isap && macport != 0) ||
(hw->isap && !(macport <= HFA384x_PORTID_MAX)) ||
!(hw->port_enabled[macport])) {
result = -EINVAL;
} else {
result = hfa384x_cmd_disable(hw, macport);
if (result == 0)
hw->port_enabled[macport] = 0;
}
return result;
}
/*----------------------------------------------------------------
* hfa384x_drvr_enable
*
* Issues the enable command to enable communications on one of
* the MACs 'ports'. Only macport 0 is valid for stations.
* APs may also enable macports 1-6. Only ports that are currently
* disabled may be enabled.
*
* Arguments:
* hw device structure
* macport MAC port number
*
* Returns:
* 0 success
* >0 f/w reported failure - f/w status code
* <0 driver reported error (timeout|bad arg)
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport)
{
int result = 0;
if ((!hw->isap && macport != 0) ||
(hw->isap && !(macport <= HFA384x_PORTID_MAX)) ||
(hw->port_enabled[macport])) {
result = -EINVAL;
} else {
result = hfa384x_cmd_enable(hw, macport);
if (result == 0)
hw->port_enabled[macport] = 1;
}
return result;
}
/*----------------------------------------------------------------
* hfa384x_drvr_flashdl_enable
*
* Begins the flash download state. Checks to see that we're not
* already in a download state and that a port isn't enabled.
* Sets the download state and retrieves the flash download
* buffer location, buffer size, and timeout length.
*
* Arguments:
* hw device structure
*
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
{
int result = 0;
int i;
/* Check that a port isn't active */
for (i = 0; i < HFA384x_PORTID_MAX; i++) {
if (hw->port_enabled[i]) {
pr_debug("called when port enabled.\n");
return -EINVAL;
}
}
/* Check that we're not already in a download state */
if (hw->dlstate != HFA384x_DLSTATE_DISABLED)
return -EINVAL;
/* Retrieve the buffer loc&size and timeout */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_DOWNLOADBUFFER,
&hw->bufinfo, sizeof(hw->bufinfo));
if (result)
return result;
le16_to_cpus(&hw->bufinfo.page);
le16_to_cpus(&hw->bufinfo.offset);
le16_to_cpus(&hw->bufinfo.len);
result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_MAXLOADTIME,
&hw->dltimeout);
if (result)
return result;
le16_to_cpus(&hw->dltimeout);
pr_debug("flashdl_enable\n");
hw->dlstate = HFA384x_DLSTATE_FLASHENABLED;
return result;
}
/*----------------------------------------------------------------
* hfa384x_drvr_flashdl_disable
*
* Ends the flash download state. Note that this will cause the MAC
* firmware to restart.
*
* Arguments:
* hw device structure
*
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_flashdl_disable(struct hfa384x *hw)
{
/* Check that we're already in the download state */
if (hw->dlstate != HFA384x_DLSTATE_FLASHENABLED)
return -EINVAL;
pr_debug("flashdl_enable\n");
/* There isn't much we can do at this point, so I don't */
/* bother w/ the return value */
hfa384x_cmd_download(hw, HFA384x_PROGMODE_DISABLE, 0, 0, 0);
hw->dlstate = HFA384x_DLSTATE_DISABLED;
return 0;
}
/*----------------------------------------------------------------
* hfa384x_drvr_flashdl_write
*
* Performs a FLASH download of a chunk of data. First checks to see
* that we're in the FLASH download state, then sets the download
* mode, uses the aux functions to 1) copy the data to the flash
* buffer, 2) sets the download 'write flash' mode, 3) readback and
* compare. Lather rinse, repeat as many times an necessary to get
* all the given data into flash.
* When all data has been written using this function (possibly
* repeatedly), call drvr_flashdl_disable() to end the download state
* and restart the MAC.
*
* Arguments:
* hw device structure
* daddr Card address to write to. (host order)
* buf Ptr to data to write.
* len Length of data (host order).
*
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr,
void *buf, u32 len)
{
int result = 0;
u32 dlbufaddr;
int nburns;
u32 burnlen;
u32 burndaddr;
u16 burnlo;
u16 burnhi;
int nwrites;
u8 *writebuf;
u16 writepage;
u16 writeoffset;
u32 writelen;
int i;
int j;
pr_debug("daddr=0x%08x len=%d\n", daddr, len);
/* Check that we're in the flash download state */
if (hw->dlstate != HFA384x_DLSTATE_FLASHENABLED)
return -EINVAL;
netdev_info(hw->wlandev->netdev,
"Download %d bytes to flash @0x%06x\n", len, daddr);
/* Convert to flat address for arithmetic */
/* NOTE: dlbuffer RID stores the address in AUX format */
dlbufaddr =
HFA384x_ADDR_AUX_MKFLAT(hw->bufinfo.page, hw->bufinfo.offset);
pr_debug("dlbuf.page=0x%04x dlbuf.offset=0x%04x dlbufaddr=0x%08x\n",
hw->bufinfo.page, hw->bufinfo.offset, dlbufaddr);
/* Calculations to determine how many fills of the dlbuffer to do
* and how many USB wmemreq's to do for each fill. At this point
* in time, the dlbuffer size and the wmemreq size are the same.
* Therefore, nwrites should always be 1. The extra complexity
* here is a hedge against future changes.
*/
/* Figure out how many times to do the flash programming */
nburns = len / hw->bufinfo.len;
nburns += (len % hw->bufinfo.len) ? 1 : 0;
/* For each flash program cycle, how many USB wmemreq's are needed? */
nwrites = hw->bufinfo.len / HFA384x_USB_RWMEM_MAXLEN;
nwrites += (hw->bufinfo.len % HFA384x_USB_RWMEM_MAXLEN) ? 1 : 0;
/* For each burn */
for (i = 0; i < nburns; i++) {
/* Get the dest address and len */
burnlen = (len - (hw->bufinfo.len * i)) > hw->bufinfo.len ?
hw->bufinfo.len : (len - (hw->bufinfo.len * i));
burndaddr = daddr + (hw->bufinfo.len * i);
burnlo = HFA384x_ADDR_CMD_MKOFF(burndaddr);
burnhi = HFA384x_ADDR_CMD_MKPAGE(burndaddr);
netdev_info(hw->wlandev->netdev, "Writing %d bytes to flash @0x%06x\n",
burnlen, burndaddr);
/* Set the download mode */
result = hfa384x_cmd_download(hw, HFA384x_PROGMODE_NV,
burnlo, burnhi, burnlen);
if (result) {
netdev_err(hw->wlandev->netdev,
"download(NV,lo=%x,hi=%x,len=%x) cmd failed, result=%d. Aborting d/l\n",
burnlo, burnhi, burnlen, result);
goto exit_proc;
}
/* copy the data to the flash download buffer */
for (j = 0; j < nwrites; j++) {
writebuf = buf +
(i * hw->bufinfo.len) +
(j * HFA384x_USB_RWMEM_MAXLEN);
writepage = HFA384x_ADDR_CMD_MKPAGE(dlbufaddr +
(j * HFA384x_USB_RWMEM_MAXLEN));
writeoffset = HFA384x_ADDR_CMD_MKOFF(dlbufaddr +
(j * HFA384x_USB_RWMEM_MAXLEN));
writelen = burnlen - (j * HFA384x_USB_RWMEM_MAXLEN);
writelen = writelen > HFA384x_USB_RWMEM_MAXLEN ?
HFA384x_USB_RWMEM_MAXLEN : writelen;
result = hfa384x_dowmem(hw,
writepage,
writeoffset,
writebuf, writelen);
}
/* set the download 'write flash' mode */
result = hfa384x_cmd_download(hw,
HFA384x_PROGMODE_NVWRITE,
0, 0, 0);
if (result) {
netdev_err(hw->wlandev->netdev,
"download(NVWRITE,lo=%x,hi=%x,len=%x) cmd failed, result=%d. Aborting d/l\n",
burnlo, burnhi, burnlen, result);
goto exit_proc;
}
/* TODO: We really should do a readback and compare. */
}
exit_proc:
/* Leave the firmware in the 'post-prog' mode. flashdl_disable will */
/* actually disable programming mode. Remember, that will cause the */
/* the firmware to effectively reset itself. */
return result;
}
/*----------------------------------------------------------------
* hfa384x_drvr_getconfig
*
* Performs the sequence necessary to read a config/info item.
*
* Arguments:
* hw device structure
* rid config/info record id (host order)
* buf host side record buffer. Upon return it will
* contain the body portion of the record (minus the
* RID and len).
* len buffer length (in bytes, should match record length)
*
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
* -ENODATA length mismatch between argument and retrieved
* record.
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
{
return hfa384x_dorrid(hw, DOWAIT, rid, buf, len, NULL, NULL, NULL);
}
/*----------------------------------------------------------------
* hfa384x_drvr_setconfig_async
*
* Performs the sequence necessary to write a config/info item.
*
* Arguments:
* hw device structure
* rid config/info record id (in host order)
* buf host side record buffer
* len buffer length (in bytes)
* usercb completion callback
* usercb_data completion callback argument
*
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int
hfa384x_drvr_setconfig_async(struct hfa384x *hw,
u16 rid,
void *buf,
u16 len, ctlx_usercb_t usercb, void *usercb_data)
{
return hfa384x_dowrid(hw, DOASYNC, rid, buf, len, hfa384x_cb_status,
usercb, usercb_data);
}
/*----------------------------------------------------------------
* hfa384x_drvr_ramdl_disable
*
* Ends the ram download state.
*
* Arguments:
* hw device structure
*
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_ramdl_disable(struct hfa384x *hw)
{
/* Check that we're already in the download state */
if (hw->dlstate != HFA384x_DLSTATE_RAMENABLED)
return -EINVAL;
pr_debug("ramdl_disable()\n");
/* There isn't much we can do at this point, so I don't */
/* bother w/ the return value */
hfa384x_cmd_download(hw, HFA384x_PROGMODE_DISABLE, 0, 0, 0);
hw->dlstate = HFA384x_DLSTATE_DISABLED;
return 0;
}
/*----------------------------------------------------------------
* hfa384x_drvr_ramdl_enable
*
* Begins the ram download state. Checks to see that we're not
* already in a download state and that a port isn't enabled.
* Sets the download state and calls cmd_download with the
* ENABLE_VOLATILE subcommand and the exeaddr argument.
*
* Arguments:
* hw device structure
* exeaddr the card execution address that will be
* jumped to when ramdl_disable() is called
* (host order).
*
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
{
int result = 0;
u16 lowaddr;
u16 hiaddr;
int i;
/* Check that a port isn't active */
for (i = 0; i < HFA384x_PORTID_MAX; i++) {
if (hw->port_enabled[i]) {
netdev_err(hw->wlandev->netdev,
"Can't download with a macport enabled.\n");
return -EINVAL;
}
}
/* Check that we're not already in a download state */
if (hw->dlstate != HFA384x_DLSTATE_DISABLED) {
netdev_err(hw->wlandev->netdev,
"Download state not disabled.\n");
return -EINVAL;
}
pr_debug("ramdl_enable, exeaddr=0x%08x\n", exeaddr);
/* Call the download(1,addr) function */
lowaddr = HFA384x_ADDR_CMD_MKOFF(exeaddr);
hiaddr = HFA384x_ADDR_CMD_MKPAGE(exeaddr);
result = hfa384x_cmd_download(hw, HFA384x_PROGMODE_RAM,
lowaddr, hiaddr, 0);
if (result == 0) {
/* Set the download state */
hw->dlstate = HFA384x_DLSTATE_RAMENABLED;
} else {
pr_debug("cmd_download(0x%04x, 0x%04x) failed, result=%d.\n",
lowaddr, hiaddr, result);
}
return result;
}
/*----------------------------------------------------------------
* hfa384x_drvr_ramdl_write
*
* Performs a RAM download of a chunk of data. First checks to see
* that we're in the RAM download state, then uses the [read|write]mem USB
* commands to 1) copy the data, 2) readback and compare. The download
* state is unaffected. When all data has been written using
* this function, call drvr_ramdl_disable() to end the download state
* and restart the MAC.
*
* Arguments:
* hw device structure
* daddr Card address to write to. (host order)
* buf Ptr to data to write.
* len Length of data (host order).
*
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
{
int result = 0;
int nwrites;
u8 *data = buf;
int i;
u32 curraddr;
u16 currpage;
u16 curroffset;
u16 currlen;
/* Check that we're in the ram download state */
if (hw->dlstate != HFA384x_DLSTATE_RAMENABLED)
return -EINVAL;
netdev_info(hw->wlandev->netdev, "Writing %d bytes to ram @0x%06x\n",
len, daddr);
/* How many dowmem calls? */
nwrites = len / HFA384x_USB_RWMEM_MAXLEN;
nwrites += len % HFA384x_USB_RWMEM_MAXLEN ? 1 : 0;
/* Do blocking wmem's */
for (i = 0; i < nwrites; i++) {
/* make address args */
curraddr = daddr + (i * HFA384x_USB_RWMEM_MAXLEN);
currpage = HFA384x_ADDR_CMD_MKPAGE(curraddr);
curroffset = HFA384x_ADDR_CMD_MKOFF(curraddr);
currlen = len - (i * HFA384x_USB_RWMEM_MAXLEN);
if (currlen > HFA384x_USB_RWMEM_MAXLEN)
currlen = HFA384x_USB_RWMEM_MAXLEN;
/* Do blocking ctlx */
result = hfa384x_dowmem(hw,
currpage,
curroffset,
data + (i * HFA384x_USB_RWMEM_MAXLEN),
currlen);
if (result)
break;
/* TODO: We really should have a readback. */
}
return result;
}
/*----------------------------------------------------------------
* hfa384x_drvr_readpda
*
* Performs the sequence to read the PDA space. Note there is no
* drvr_writepda() function. Writing a PDA is
* generally implemented by a calling component via calls to
* cmd_download and writing to the flash download buffer via the
* aux regs.
*
* Arguments:
* hw device structure
* buf buffer to store PDA in
* len buffer length
*
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
* -ETIMEDOUT timeout waiting for the cmd regs to become
* available, or waiting for the control reg
* to indicate the Aux port is enabled.
* -ENODATA the buffer does NOT contain a valid PDA.
* Either the card PDA is bad, or the auxdata
* reads are giving us garbage.
*
*
* Side effects:
*
* Call context:
* process or non-card interrupt.
*----------------------------------------------------------------
*/
int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
{
int result = 0;
__le16 *pda = buf;
int pdaok = 0;
int morepdrs = 1;
int currpdr = 0; /* word offset of the current pdr */
size_t i;
u16 pdrlen; /* pdr length in bytes, host order */
u16 pdrcode; /* pdr code, host order */
u16 currpage;
u16 curroffset;
struct pdaloc {
u32 cardaddr;
u16 auxctl;
} pdaloc[] = {
{
HFA3842_PDA_BASE, 0}, {
HFA3841_PDA_BASE, 0}, {
HFA3841_PDA_BOGUS_BASE, 0}
};
/* Read the pda from each known address. */
for (i = 0; i < ARRAY_SIZE(pdaloc); i++) {
/* Make address */
currpage = HFA384x_ADDR_CMD_MKPAGE(pdaloc[i].cardaddr);
curroffset = HFA384x_ADDR_CMD_MKOFF(pdaloc[i].cardaddr);
/* units of bytes */
result = hfa384x_dormem(hw, currpage, curroffset, buf,
len);
if (result) {
netdev_warn(hw->wlandev->netdev,
"Read from index %zd failed, continuing\n",
i);
continue;
}
/* Test for garbage */
pdaok = 1; /* initially assume good */
morepdrs = 1;
while (pdaok && morepdrs) {
pdrlen = le16_to_cpu(pda[currpdr]) * 2;
pdrcode = le16_to_cpu(pda[currpdr + 1]);
/* Test the record length */
if (pdrlen > HFA384x_PDR_LEN_MAX || pdrlen == 0) {
netdev_err(hw->wlandev->netdev,
"pdrlen invalid=%d\n", pdrlen);
pdaok = 0;
break;
}
/* Test the code */
if (!hfa384x_isgood_pdrcode(pdrcode)) {
netdev_err(hw->wlandev->netdev, "pdrcode invalid=%d\n",
pdrcode);
pdaok = 0;
break;
}
/* Test for completion */
if (pdrcode == HFA384x_PDR_END_OF_PDA)
morepdrs = 0;
/* Move to the next pdr (if necessary) */
if (morepdrs) {
/* note the access to pda[], need words here */
currpdr += le16_to_cpu(pda[currpdr]) + 1;
}
}
if (pdaok) {
netdev_info(hw->wlandev->netdev,
"PDA Read from 0x%08x in %s space.\n",
pdaloc[i].cardaddr,
pdaloc[i].auxctl == 0 ? "EXTDS" :
pdaloc[i].auxctl == 1 ? "NV" :
pdaloc[i].auxctl == 2 ? "PHY" :
pdaloc[i].auxctl == 3 ? "ICSRAM" :
"<bogus auxctl>");
break;
}
}
result = pdaok ? 0 : -ENODATA;
if (result)
pr_debug("Failure: pda is not okay\n");
return result;
}
/*----------------------------------------------------------------
* hfa384x_drvr_setconfig
*
* Performs the sequence necessary to write a config/info item.
*
* Arguments:
* hw device structure
* rid config/info record id (in host order)
* buf host side record buffer
* len buffer length (in bytes)
*
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
{
return hfa384x_dowrid(hw, DOWAIT, rid, buf, len, NULL, NULL, NULL);
}
/*----------------------------------------------------------------
* hfa384x_drvr_start
*
* Issues the MAC initialize command, sets up some data structures,
* and enables the interrupts. After this function completes, the
* low-level stuff should be ready for any/all commands.
*
* Arguments:
* hw device structure
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_start(struct hfa384x *hw)
{
int result, result1, result2;
u16 status;
might_sleep();
/* Clear endpoint stalls - but only do this if the endpoint
* is showing a stall status. Some prism2 cards seem to behave
* badly if a clear_halt is called when the endpoint is already
* ok
*/
result =
usb_get_std_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_in,
&status);
if (result < 0) {
netdev_err(hw->wlandev->netdev, "Cannot get bulk in endpoint status.\n");
goto done;
}
if ((status == 1) && usb_clear_halt(hw->usb, hw->endp_in))
netdev_err(hw->wlandev->netdev, "Failed to reset bulk in endpoint.\n");
result =
usb_get_std_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_out,
&status);
if (result < 0) {
netdev_err(hw->wlandev->netdev, "Cannot get bulk out endpoint status.\n");
goto done;
}
if ((status == 1) && usb_clear_halt(hw->usb, hw->endp_out))
netdev_err(hw->wlandev->netdev, "Failed to reset bulk out endpoint.\n");
/* Synchronous unlink, in case we're trying to restart the driver */
usb_kill_urb(&hw->rx_urb);
/* Post the IN urb */
result = submit_rx_urb(hw, GFP_KERNEL);
if (result != 0) {
netdev_err(hw->wlandev->netdev,
"Fatal, failed to submit RX URB, result=%d\n",
result);
goto done;
}
/* Call initialize twice, with a 1 second sleep in between.
* This is a nasty work-around since many prism2 cards seem to
* need time to settle after an init from cold. The second
* call to initialize in theory is not necessary - but we call
* it anyway as a double insurance policy:
* 1) If the first init should fail, the second may well succeed
* and the card can still be used
* 2) It helps ensures all is well with the card after the first
* init and settle time.
*/
result1 = hfa384x_cmd_initialize(hw);
msleep(1000);
result = hfa384x_cmd_initialize(hw);
result2 = result;
if (result1 != 0) {
if (result2 != 0) {
netdev_err(hw->wlandev->netdev,
"cmd_initialize() failed on two attempts, results %d and %d\n",
result1, result2);
usb_kill_urb(&hw->rx_urb);
goto done;
} else {
pr_debug("First cmd_initialize() failed (result %d),\n",
result1);
pr_debug("but second attempt succeeded. All should be ok\n");
}
} else if (result2 != 0) {
netdev_warn(hw->wlandev->netdev, "First cmd_initialize() succeeded, but second attempt failed (result=%d)\n",
result2);
netdev_warn(hw->wlandev->netdev,
"Most likely the card will be functional\n");
goto done;
}
hw->state = HFA384x_STATE_RUNNING;
done:
return result;
}
/*----------------------------------------------------------------
* hfa384x_drvr_stop
*
* Shuts down the MAC to the point where it is safe to unload the
* driver. Any subsystem that may be holding a data or function
* ptr into the driver must be cleared/deinitialized.
*
* Arguments:
* hw device structure
* Returns:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
*
* Side effects:
*
* Call context:
* process
*----------------------------------------------------------------
*/
int hfa384x_drvr_stop(struct hfa384x *hw)
{
int i;
might_sleep();
/* There's no need for spinlocks here. The USB "disconnect"
* function sets this "removed" flag and then calls us.
*/
if (!hw->wlandev->hwremoved) {
/* Call initialize to leave the MAC in its 'reset' state */
hfa384x_cmd_initialize(hw);
/* Cancel the rxurb */
usb_kill_urb(&hw->rx_urb);
}
hw->link_status = HFA384x_LINK_NOTCONNECTED;
hw->state = HFA384x_STATE_INIT;
del_timer_sync(&hw->commsqual_timer);
/* Clear all the port status */
for (i = 0; i < HFA384x_NUMPORTS_MAX; i++)
hw->port_enabled[i] = 0;
return 0;
}
/*----------------------------------------------------------------
* hfa384x_drvr_txframe
*
* Takes a frame from prism2sta and queues it for transmission.
*
* Arguments:
* hw device structure
* skb packet buffer struct. Contains an 802.11
* data frame.
* p80211_hdr points to the 802.11 header for the packet.
* Returns:
* 0 Success and more buffs available
* 1 Success but no more buffs
* 2 Allocation failure
* 4 Buffer full or queue busy
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
struct p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
{
int usbpktlen = sizeof(struct hfa384x_tx_frame);
int result;
int ret;
char *ptr;
if (hw->tx_urb.status == -EINPROGRESS) {
netdev_warn(hw->wlandev->netdev, "TX URB already in use\n");
result = 3;
goto exit;
}
/* Build Tx frame structure */
/* Set up the control field */
memset(&hw->txbuff.txfrm.desc, 0, sizeof(hw->txbuff.txfrm.desc));
/* Setup the usb type field */
hw->txbuff.type = cpu_to_le16(HFA384x_USB_TXFRM);
/* Set up the sw_support field to identify this frame */
hw->txbuff.txfrm.desc.sw_support = 0x0123;
/* Tx complete and Tx exception disable per dleach. Might be causing
* buf depletion
*/
/* #define DOEXC SLP -- doboth breaks horribly under load, doexc less so. */
#if defined(DOBOTH)
hw->txbuff.txfrm.desc.tx_control =
HFA384x_TX_MACPORT_SET(0) | HFA384x_TX_STRUCTYPE_SET(1) |
HFA384x_TX_TXEX_SET(1) | HFA384x_TX_TXOK_SET(1);
#elif defined(DOEXC)
hw->txbuff.txfrm.desc.tx_control =
HFA384x_TX_MACPORT_SET(0) | HFA384x_TX_STRUCTYPE_SET(1) |
HFA384x_TX_TXEX_SET(1) | HFA384x_TX_TXOK_SET(0);
#else
hw->txbuff.txfrm.desc.tx_control =
HFA384x_TX_MACPORT_SET(0) | HFA384x_TX_STRUCTYPE_SET(1) |
HFA384x_TX_TXEX_SET(0) | HFA384x_TX_TXOK_SET(0);
#endif
cpu_to_le16s(&hw->txbuff.txfrm.desc.tx_control);
/* copy the header over to the txdesc */
hw->txbuff.txfrm.desc.hdr = *p80211_hdr;
/* if we're using host WEP, increase size by IV+ICV */
if (p80211_wep->data) {
hw->txbuff.txfrm.desc.data_len = cpu_to_le16(skb->len + 8);
usbpktlen += 8;
} else {
hw->txbuff.txfrm.desc.data_len = cpu_to_le16(skb->len);
}
usbpktlen += skb->len;
/* copy over the WEP IV if we are using host WEP */
ptr = hw->txbuff.txfrm.data;
if (p80211_wep->data) {
memcpy(ptr, p80211_wep->iv, sizeof(p80211_wep->iv));
ptr += sizeof(p80211_wep->iv);
memcpy(ptr, p80211_wep->data, skb->len);
} else {
memcpy(ptr, skb->data, skb->len);
}
/* copy over the packet data */
ptr += skb->len;
/* copy over the WEP ICV if we are using host WEP */
if (p80211_wep->data)
memcpy(ptr, p80211_wep->icv, sizeof(p80211_wep->icv));
/* Send the USB packet */
usb_fill_bulk_urb(&hw->tx_urb, hw->usb,
hw->endp_out,
&hw->txbuff, ROUNDUP64(usbpktlen),
hfa384x_usbout_callback, hw->wlandev);
hw->tx_urb.transfer_flags |= USB_QUEUE_BULK;
result = 1;
ret = submit_tx_urb(hw, &hw->tx_urb, GFP_ATOMIC);
if (ret != 0) {
netdev_err(hw->wlandev->netdev,
"submit_tx_urb() failed, error=%d\n", ret);
result = 3;
}
exit:
return result;
}
void hfa384x_tx_timeout(struct wlandevice *wlandev)
{
struct hfa384x *hw = wlandev->priv;
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
if (!hw->wlandev->hwremoved) {
int sched;
sched = !test_and_set_bit(WORK_TX_HALT, &hw->usb_flags);
sched |= !test_and_set_bit(WORK_RX_HALT, &hw->usb_flags);
if (sched)
schedule_work(&hw->usb_work);
}
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
}
/*----------------------------------------------------------------
* hfa384x_usbctlx_reaper_task
*
* Deferred work callback to delete dead CTLX objects
*
* Arguments:
* work contains ptr to a struct hfa384x
*
* Returns:
*
* Call context:
* Task
*----------------------------------------------------------------
*/
static void hfa384x_usbctlx_reaper_task(struct work_struct *work)
{
struct hfa384x *hw = container_of(work, struct hfa384x, reaper_bh);
struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
/* This list is guaranteed to be empty if someone
* has unplugged the adapter.
*/
list_for_each_entry_safe(ctlx, temp, &hw->ctlxq.reapable, list) {
list_del(&ctlx->list);
kfree(ctlx);
}
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
}
/*----------------------------------------------------------------
* hfa384x_usbctlx_completion_task
*
* Deferred work callback to call completion handlers for returned CTLXs
*
* Arguments:
* work contains ptr to a struct hfa384x
*
* Returns:
* Nothing
*
* Call context:
* Task
*----------------------------------------------------------------
*/
static void hfa384x_usbctlx_completion_task(struct work_struct *work)
{
struct hfa384x *hw = container_of(work, struct hfa384x, completion_bh);
struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
int reap = 0;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
/* This list is guaranteed to be empty if someone
* has unplugged the adapter ...
*/
list_for_each_entry_safe(ctlx, temp, &hw->ctlxq.completing, list) {
/* Call the completion function that this
* command was assigned, assuming it has one.
*/
if (ctlx->cmdcb) {
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
ctlx->cmdcb(hw, ctlx);
spin_lock_irqsave(&hw->ctlxq.lock, flags);
/* Make sure we don't try and complete
* this CTLX more than once!
*/
ctlx->cmdcb = NULL;
/* Did someone yank the adapter out
* while our list was (briefly) unlocked?
*/
if (hw->wlandev->hwremoved) {
reap = 0;
break;
}
}
/*
* "Reapable" CTLXs are ones which don't have any
* threads waiting for them to die. Hence they must
* be delivered to The Reaper!
*/
if (ctlx->reapable) {
/* Move the CTLX off the "completing" list (hopefully)
* on to the "reapable" list where the reaper task
* can find it. And "reapable" means that this CTLX
* isn't sitting on a wait-queue somewhere.
*/
list_move_tail(&ctlx->list, &hw->ctlxq.reapable);
reap = 1;
}
complete(&ctlx->done);
}
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
if (reap)
schedule_work(&hw->reaper_bh);
}
/*----------------------------------------------------------------
* unlocked_usbctlx_cancel_async
*
* Mark the CTLX dead asynchronously, and ensure that the
* next command on the queue is run afterwards.
*
* Arguments:
* hw ptr to the struct hfa384x structure
* ctlx ptr to a CTLX structure
*
* Returns:
* 0 the CTLX's URB is inactive
* -EINPROGRESS the URB is currently being unlinked
*
* Call context:
* Either process or interrupt, but presumably interrupt
*----------------------------------------------------------------
*/
static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx)
{
int ret;
/*
* Try to delete the URB containing our request packet.
* If we succeed, then its completion handler will be
* called with a status of -ECONNRESET.
*/
hw->ctlx_urb.transfer_flags |= URB_ASYNC_UNLINK;
ret = usb_unlink_urb(&hw->ctlx_urb);
if (ret != -EINPROGRESS) {
/*
* The OUT URB had either already completed
* or was still in the pending queue, so the
* URB's completion function will not be called.
* We will have to complete the CTLX ourselves.
*/
ctlx->state = CTLX_REQ_FAILED;
unlocked_usbctlx_complete(hw, ctlx);
ret = 0;
}
return ret;
}
/*----------------------------------------------------------------
* unlocked_usbctlx_complete
*
* A CTLX has completed. It may have been successful, it may not
* have been. At this point, the CTLX should be quiescent. The URBs
* aren't active and the timers should have been stopped.
*
* The CTLX is migrated to the "completing" queue, and the completing
* work is scheduled.
*
* Arguments:
* hw ptr to a struct hfa384x structure
* ctlx ptr to a ctlx structure
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* Either, assume interrupt
*----------------------------------------------------------------
*/
static void unlocked_usbctlx_complete(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx)
{
/* Timers have been stopped, and ctlx should be in
* a terminal state. Retire it from the "active"
* queue.
*/
list_move_tail(&ctlx->list, &hw->ctlxq.completing);
schedule_work(&hw->completion_bh);
switch (ctlx->state) {
case CTLX_COMPLETE:
case CTLX_REQ_FAILED:
/* This are the correct terminating states. */
break;
default:
netdev_err(hw->wlandev->netdev, "CTLX[%d] not in a terminating state(%s)\n",
le16_to_cpu(ctlx->outbuf.type),
ctlxstr(ctlx->state));
break;
} /* switch */
}
/*----------------------------------------------------------------
* hfa384x_usbctlxq_run
*
* Checks to see if the head item is running. If not, starts it.
*
* Arguments:
* hw ptr to struct hfa384x
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* any
*----------------------------------------------------------------
*/
static void hfa384x_usbctlxq_run(struct hfa384x *hw)
{
unsigned long flags;
/* acquire lock */
spin_lock_irqsave(&hw->ctlxq.lock, flags);
/* Only one active CTLX at any one time, because there's no
* other (reliable) way to match the response URB to the
* correct CTLX.
*
* Don't touch any of these CTLXs if the hardware
* has been removed or the USB subsystem is stalled.
*/
if (!list_empty(&hw->ctlxq.active) ||
test_bit(WORK_TX_HALT, &hw->usb_flags) || hw->wlandev->hwremoved)
goto unlock;
while (!list_empty(&hw->ctlxq.pending)) {
struct hfa384x_usbctlx *head;
int result;
/* This is the first pending command */
head = list_entry(hw->ctlxq.pending.next,
struct hfa384x_usbctlx, list);
/* We need to split this off to avoid a race condition */
list_move_tail(&head->list, &hw->ctlxq.active);
/* Fill the out packet */
usb_fill_bulk_urb(&hw->ctlx_urb, hw->usb,
hw->endp_out,
&head->outbuf, ROUNDUP64(head->outbufsize),
hfa384x_ctlxout_callback, hw);
hw->ctlx_urb.transfer_flags |= USB_QUEUE_BULK;
/* Now submit the URB and update the CTLX's state */
result = usb_submit_urb(&hw->ctlx_urb, GFP_ATOMIC);
if (result == 0) {
/* This CTLX is now running on the active queue */
head->state = CTLX_REQ_SUBMITTED;
/* Start the OUT wait timer */
hw->req_timer_done = 0;
hw->reqtimer.expires = jiffies + HZ;
add_timer(&hw->reqtimer);
/* Start the IN wait timer */
hw->resp_timer_done = 0;
hw->resptimer.expires = jiffies + 2 * HZ;
add_timer(&hw->resptimer);
break;
}
if (result == -EPIPE) {
/* The OUT pipe needs resetting, so put
* this CTLX back in the "pending" queue
* and schedule a reset ...
*/
netdev_warn(hw->wlandev->netdev,
"%s tx pipe stalled: requesting reset\n",
hw->wlandev->netdev->name);
list_move(&head->list, &hw->ctlxq.pending);
set_bit(WORK_TX_HALT, &hw->usb_flags);
schedule_work(&hw->usb_work);
break;
}
if (result == -ESHUTDOWN) {
netdev_warn(hw->wlandev->netdev, "%s urb shutdown!\n",
hw->wlandev->netdev->name);
break;
}
netdev_err(hw->wlandev->netdev, "Failed to submit CTLX[%d]: error=%d\n",
le16_to_cpu(head->outbuf.type), result);
unlocked_usbctlx_complete(hw, head);
} /* while */
unlock:
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
}
/*----------------------------------------------------------------
* hfa384x_usbin_callback
*
* Callback for URBs on the BULKIN endpoint.
*
* Arguments:
* urb ptr to the completed urb
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static void hfa384x_usbin_callback(struct urb *urb)
{
struct wlandevice *wlandev = urb->context;
struct hfa384x *hw;
union hfa384x_usbin *usbin;
struct sk_buff *skb = NULL;
int result;
int urb_status;
u16 type;
enum USBIN_ACTION {
HANDLE,
RESUBMIT,
ABORT
} action;
if (!wlandev || !wlandev->netdev || wlandev->hwremoved)
goto exit;
hw = wlandev->priv;
if (!hw)
goto exit;
skb = hw->rx_urb_skb;
if (!skb || (skb->data != urb->transfer_buffer)) {
WARN_ON(1);
return;
}
hw->rx_urb_skb = NULL;
/* Check for error conditions within the URB */
switch (urb->status) {
case 0:
action = HANDLE;
/* Check for short packet */
if (urb->actual_length == 0) {
wlandev->netdev->stats.rx_errors++;
wlandev->netdev->stats.rx_length_errors++;
action = RESUBMIT;
}
break;
case -EPIPE:
netdev_warn(hw->wlandev->netdev, "%s rx pipe stalled: requesting reset\n",
wlandev->netdev->name);
if (!test_and_set_bit(WORK_RX_HALT, &hw->usb_flags))
schedule_work(&hw->usb_work);
wlandev->netdev->stats.rx_errors++;
action = ABORT;
break;
case -EILSEQ:
case -ETIMEDOUT:
case -EPROTO:
if (!test_and_set_bit(THROTTLE_RX, &hw->usb_flags) &&
!timer_pending(&hw->throttle)) {
mod_timer(&hw->throttle, jiffies + THROTTLE_JIFFIES);
}
wlandev->netdev->stats.rx_errors++;
action = ABORT;
break;
case -EOVERFLOW:
wlandev->netdev->stats.rx_over_errors++;
action = RESUBMIT;
break;
case -ENODEV:
case -ESHUTDOWN:
pr_debug("status=%d, device removed.\n", urb->status);
action = ABORT;
break;
case -ENOENT:
case -ECONNRESET:
pr_debug("status=%d, urb explicitly unlinked.\n", urb->status);
action = ABORT;
break;
default:
pr_debug("urb status=%d, transfer flags=0x%x\n",
urb->status, urb->transfer_flags);
wlandev->netdev->stats.rx_errors++;
action = RESUBMIT;
break;
}
/* Save values from the RX URB before reposting overwrites it. */
urb_status = urb->status;
usbin = (union hfa384x_usbin *)urb->transfer_buffer;
if (action != ABORT) {
/* Repost the RX URB */
result = submit_rx_urb(hw, GFP_ATOMIC);
if (result != 0) {
netdev_err(hw->wlandev->netdev,
"Fatal, failed to resubmit rx_urb. error=%d\n",
result);
}
}
/* Handle any USB-IN packet */
/* Note: the check of the sw_support field, the type field doesn't
* have bit 12 set like the docs suggest.
*/
type = le16_to_cpu(usbin->type);
if (HFA384x_USB_ISRXFRM(type)) {
if (action == HANDLE) {
if (usbin->txfrm.desc.sw_support == 0x0123) {
hfa384x_usbin_txcompl(wlandev, usbin);
} else {
skb_put(skb, sizeof(*usbin));
hfa384x_usbin_rx(wlandev, skb);
skb = NULL;
}
}
goto exit;
}
if (HFA384x_USB_ISTXFRM(type)) {
if (action == HANDLE)
hfa384x_usbin_txcompl(wlandev, usbin);
goto exit;
}
switch (type) {
case HFA384x_USB_INFOFRM:
if (action == ABORT)
goto exit;
if (action == HANDLE)
hfa384x_usbin_info(wlandev, usbin);
break;
case HFA384x_USB_CMDRESP:
case HFA384x_USB_WRIDRESP:
case HFA384x_USB_RRIDRESP:
case HFA384x_USB_WMEMRESP:
case HFA384x_USB_RMEMRESP:
/* ALWAYS, ALWAYS, ALWAYS handle this CTLX!!!! */
hfa384x_usbin_ctlx(hw, usbin, urb_status);
break;
case HFA384x_USB_BUFAVAIL:
pr_debug("Received BUFAVAIL packet, frmlen=%d\n",
usbin->bufavail.frmlen);
break;
case HFA384x_USB_ERROR:
pr_debug("Received USB_ERROR packet, errortype=%d\n",
usbin->usberror.errortype);
break;
default:
pr_debug("Unrecognized USBIN packet, type=%x, status=%d\n",
usbin->type, urb_status);
break;
} /* switch */
exit:
if (skb)
dev_kfree_skb(skb);
}
/*----------------------------------------------------------------
* hfa384x_usbin_ctlx
*
* We've received a URB containing a Prism2 "response" message.
* This message needs to be matched up with a CTLX on the active
* queue and our state updated accordingly.
*
* Arguments:
* hw ptr to struct hfa384x
* usbin ptr to USB IN packet
* urb_status status of this Bulk-In URB
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
int urb_status)
{
struct hfa384x_usbctlx *ctlx;
int run_queue = 0;
unsigned long flags;
retry:
spin_lock_irqsave(&hw->ctlxq.lock, flags);
/* There can be only one CTLX on the active queue
* at any one time, and this is the CTLX that the
* timers are waiting for.
*/
if (list_empty(&hw->ctlxq.active))
goto unlock;
/* Remove the "response timeout". It's possible that
* we are already too late, and that the timeout is
* already running. And that's just too bad for us,
* because we could lose our CTLX from the active
* queue here ...
*/
if (del_timer(&hw->resptimer) == 0) {
if (hw->resp_timer_done == 0) {
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
goto retry;
}
} else {
hw->resp_timer_done = 1;
}
ctlx = get_active_ctlx(hw);
if (urb_status != 0) {
/*
* Bad CTLX, so get rid of it. But we only
* remove it from the active queue if we're no
* longer expecting the OUT URB to complete.
*/
if (unlocked_usbctlx_cancel_async(hw, ctlx) == 0)
run_queue = 1;
} else {
const __le16 intype = (usbin->type & ~cpu_to_le16(0x8000));
/*
* Check that our message is what we're expecting ...
*/
if (ctlx->outbuf.type != intype) {
netdev_warn(hw->wlandev->netdev,
"Expected IN[%d], received IN[%d] - ignored.\n",
le16_to_cpu(ctlx->outbuf.type),
le16_to_cpu(intype));
goto unlock;
}
/* This URB has succeeded, so grab the data ... */
memcpy(&ctlx->inbuf, usbin, sizeof(ctlx->inbuf));
switch (ctlx->state) {
case CTLX_REQ_SUBMITTED:
/*
* We have received our response URB before
* our request has been acknowledged. Odd,
* but our OUT URB is still alive...
*/
pr_debug("Causality violation: please reboot Universe\n");
ctlx->state = CTLX_RESP_COMPLETE;
break;
case CTLX_REQ_COMPLETE:
/*
* This is the usual path: our request
* has already been acknowledged, and
* now we have received the reply too.
*/
ctlx->state = CTLX_COMPLETE;
unlocked_usbctlx_complete(hw, ctlx);
run_queue = 1;
break;
default:
/*
* Throw this CTLX away ...
*/
netdev_err(hw->wlandev->netdev,
"Matched IN URB, CTLX[%d] in invalid state(%s). Discarded.\n",
le16_to_cpu(ctlx->outbuf.type),
ctlxstr(ctlx->state));
if (unlocked_usbctlx_cancel_async(hw, ctlx) == 0)
run_queue = 1;
break;
} /* switch */
}
unlock:
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
if (run_queue)
hfa384x_usbctlxq_run(hw);
}
/*----------------------------------------------------------------
* hfa384x_usbin_txcompl
*
* At this point we have the results of a previous transmit.
*
* Arguments:
* wlandev wlan device
* usbin ptr to the usb transfer buffer
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static void hfa384x_usbin_txcompl(struct wlandevice *wlandev,
union hfa384x_usbin *usbin)
{
u16 status;
status = le16_to_cpu(usbin->type); /* yeah I know it says type... */
/* Was there an error? */
if (HFA384x_TXSTATUS_ISERROR(status))
prism2sta_ev_txexc(wlandev, status);
else
prism2sta_ev_tx(wlandev, status);
}
/*----------------------------------------------------------------
* hfa384x_usbin_rx
*
* At this point we have a successful received a rx frame packet.
*
* Arguments:
* wlandev wlan device
* usbin ptr to the usb transfer buffer
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
{
union hfa384x_usbin *usbin = (union hfa384x_usbin *)skb->data;
struct hfa384x *hw = wlandev->priv;
int hdrlen;
struct p80211_rxmeta *rxmeta;
u16 data_len;
u16 fc;
u16 status;
/* Byte order convert once up front. */
le16_to_cpus(&usbin->rxfrm.desc.status);
le32_to_cpus(&usbin->rxfrm.desc.time);
/* Now handle frame based on port# */
status = HFA384x_RXSTATUS_MACPORT_GET(usbin->rxfrm.desc.status);
switch (status) {
case 0:
fc = le16_to_cpu(usbin->rxfrm.desc.hdr.frame_control);
/* If exclude and we receive an unencrypted, drop it */
if ((wlandev->hostwep & HOSTWEP_EXCLUDEUNENCRYPTED) &&
!WLAN_GET_FC_ISWEP(fc)) {
break;
}
data_len = le16_to_cpu(usbin->rxfrm.desc.data_len);
/* How much header data do we have? */
hdrlen = p80211_headerlen(fc);
/* Pull off the descriptor */
skb_pull(skb, sizeof(struct hfa384x_rx_frame));
/* Now shunt the header block up against the data block
* with an "overlapping" copy
*/
memmove(skb_push(skb, hdrlen),
&usbin->rxfrm.desc.hdr, hdrlen);
skb->dev = wlandev->netdev;
/* And set the frame length properly */
skb_trim(skb, data_len + hdrlen);
/* The prism2 series does not return the CRC */
memset(skb_put(skb, WLAN_CRC_LEN), 0xff, WLAN_CRC_LEN);
skb_reset_mac_header(skb);
/* Attach the rxmeta, set some stuff */
p80211skb_rxmeta_attach(wlandev, skb);
rxmeta = p80211skb_rxmeta(skb);
rxmeta->mactime = usbin->rxfrm.desc.time;
rxmeta->rxrate = usbin->rxfrm.desc.rate;
rxmeta->signal = usbin->rxfrm.desc.signal - hw->dbmadjust;
rxmeta->noise = usbin->rxfrm.desc.silence - hw->dbmadjust;
p80211netdev_rx(wlandev, skb);
break;
case 7:
if (!HFA384x_RXSTATUS_ISFCSERR(usbin->rxfrm.desc.status)) {
/* Copy to wlansnif skb */
hfa384x_int_rxmonitor(wlandev, &usbin->rxfrm);
dev_kfree_skb(skb);
} else {
pr_debug("Received monitor frame: FCSerr set\n");
}
break;
default:
netdev_warn(hw->wlandev->netdev,
"Received frame on unsupported port=%d\n",
status);
break;
}
}
/*----------------------------------------------------------------
* hfa384x_int_rxmonitor
*
* Helper function for int_rx. Handles monitor frames.
* Note that this function allocates space for the FCS and sets it
* to 0xffffffff. The hfa384x doesn't give us the FCS value but the
* higher layers expect it. 0xffffffff is used as a flag to indicate
* the FCS is bogus.
*
* Arguments:
* wlandev wlan device structure
* rxfrm rx descriptor read from card in int_rx
*
* Returns:
* nothing
*
* Side effects:
* Allocates an skb and passes it up via the PF_PACKET interface.
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
struct hfa384x_usb_rxfrm *rxfrm)
{
struct hfa384x_rx_frame *rxdesc = &rxfrm->desc;
unsigned int hdrlen = 0;
unsigned int datalen = 0;
unsigned int skblen = 0;
u8 *datap;
u16 fc;
struct sk_buff *skb;
struct hfa384x *hw = wlandev->priv;
/* Remember the status, time, and data_len fields are in host order */
/* Figure out how big the frame is */
fc = le16_to_cpu(rxdesc->hdr.frame_control);
hdrlen = p80211_headerlen(fc);
datalen = le16_to_cpu(rxdesc->data_len);
/* Allocate an ind message+framesize skb */
skblen = sizeof(struct p80211_caphdr) + hdrlen + datalen + WLAN_CRC_LEN;
/* sanity check the length */
if (skblen >
(sizeof(struct p80211_caphdr) +
WLAN_HDR_A4_LEN + WLAN_DATA_MAXLEN + WLAN_CRC_LEN)) {
pr_debug("overlen frm: len=%zd\n",
skblen - sizeof(struct p80211_caphdr));
return;
}
skb = dev_alloc_skb(skblen);
if (!skb)
return;
/* only prepend the prism header if in the right mode */
if ((wlandev->netdev->type == ARPHRD_IEEE80211_PRISM) &&
(hw->sniffhdr != 0)) {
struct p80211_caphdr *caphdr;
/* The NEW header format! */
datap = skb_put(skb, sizeof(struct p80211_caphdr));
caphdr = (struct p80211_caphdr *)datap;
caphdr->version = htonl(P80211CAPTURE_VERSION);
caphdr->length = htonl(sizeof(struct p80211_caphdr));
caphdr->mactime = __cpu_to_be64(rxdesc->time * 1000);
caphdr->hosttime = __cpu_to_be64(jiffies);
caphdr->phytype = htonl(4); /* dss_dot11_b */
caphdr->channel = htonl(hw->sniff_channel);
caphdr->datarate = htonl(rxdesc->rate);
caphdr->antenna = htonl(0); /* unknown */
caphdr->priority = htonl(0); /* unknown */
caphdr->ssi_type = htonl(3); /* rssi_raw */
caphdr->ssi_signal = htonl(rxdesc->signal);
caphdr->ssi_noise = htonl(rxdesc->silence);
caphdr->preamble = htonl(0); /* unknown */
caphdr->encoding = htonl(1); /* cck */
}
/* Copy the 802.11 header to the skb
* (ctl frames may be less than a full header)
*/
skb_put_data(skb, &rxdesc->hdr.frame_control, hdrlen);
/* If any, copy the data from the card to the skb */
if (datalen > 0) {
datap = skb_put_data(skb, rxfrm->data, datalen);
/* check for unencrypted stuff if WEP bit set. */
if (*(datap - hdrlen + 1) & 0x40) /* wep set */
if ((*(datap) == 0xaa) && (*(datap + 1) == 0xaa))
/* clear wep; it's the 802.2 header! */
*(datap - hdrlen + 1) &= 0xbf;
}
if (hw->sniff_fcs) {
/* Set the FCS */
datap = skb_put(skb, WLAN_CRC_LEN);
memset(datap, 0xff, WLAN_CRC_LEN);
}
/* pass it back up */
p80211netdev_rx(wlandev, skb);
}
/*----------------------------------------------------------------
* hfa384x_usbin_info
*
* At this point we have a successful received a Prism2 info frame.
*
* Arguments:
* wlandev wlan device
* usbin ptr to the usb transfer buffer
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static void hfa384x_usbin_info(struct wlandevice *wlandev,
union hfa384x_usbin *usbin)
{
le16_to_cpus(&usbin->infofrm.info.framelen);
prism2sta_ev_info(wlandev, &usbin->infofrm.info);
}
/*----------------------------------------------------------------
* hfa384x_usbout_callback
*
* Callback for URBs on the BULKOUT endpoint.
*
* Arguments:
* urb ptr to the completed urb
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static void hfa384x_usbout_callback(struct urb *urb)
{
struct wlandevice *wlandev = urb->context;
#ifdef DEBUG_USB
dbprint_urb(urb);
#endif
if (wlandev && wlandev->netdev) {
switch (urb->status) {
case 0:
prism2sta_ev_alloc(wlandev);
break;
case -EPIPE: {
struct hfa384x *hw = wlandev->priv;
netdev_warn(hw->wlandev->netdev,
"%s tx pipe stalled: requesting reset\n",
wlandev->netdev->name);
if (!test_and_set_bit(WORK_TX_HALT, &hw->usb_flags))
schedule_work(&hw->usb_work);
wlandev->netdev->stats.tx_errors++;
break;
}
case -EPROTO:
case -ETIMEDOUT:
case -EILSEQ: {
struct hfa384x *hw = wlandev->priv;
if (!test_and_set_bit(THROTTLE_TX, &hw->usb_flags) &&
!timer_pending(&hw->throttle)) {
mod_timer(&hw->throttle,
jiffies + THROTTLE_JIFFIES);
}
wlandev->netdev->stats.tx_errors++;
netif_stop_queue(wlandev->netdev);
break;
}
case -ENOENT:
case -ESHUTDOWN:
/* Ignorable errors */
break;
default:
netdev_info(wlandev->netdev, "unknown urb->status=%d\n",
urb->status);
wlandev->netdev->stats.tx_errors++;
break;
} /* switch */
}
}
/*----------------------------------------------------------------
* hfa384x_ctlxout_callback
*
* Callback for control data on the BULKOUT endpoint.
*
* Arguments:
* urb ptr to the completed urb
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static void hfa384x_ctlxout_callback(struct urb *urb)
{
struct hfa384x *hw = urb->context;
int delete_resptimer = 0;
int timer_ok = 1;
int run_queue = 0;
struct hfa384x_usbctlx *ctlx;
unsigned long flags;
pr_debug("urb->status=%d\n", urb->status);
#ifdef DEBUG_USB
dbprint_urb(urb);
#endif
if ((urb->status == -ESHUTDOWN) ||
(urb->status == -ENODEV) || !hw)
return;
retry:
spin_lock_irqsave(&hw->ctlxq.lock, flags);
/*
* Only one CTLX at a time on the "active" list, and
* none at all if we are unplugged. However, we can
* rely on the disconnect function to clean everything
* up if someone unplugged the adapter.
*/
if (list_empty(&hw->ctlxq.active)) {
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
return;
}
/*
* Having something on the "active" queue means
* that we have timers to worry about ...
*/
if (del_timer(&hw->reqtimer) == 0) {
if (hw->req_timer_done == 0) {
/*
* This timer was actually running while we
* were trying to delete it. Let it terminate
* gracefully instead.
*/
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
goto retry;
}
} else {
hw->req_timer_done = 1;
}
ctlx = get_active_ctlx(hw);
if (urb->status == 0) {
/* Request portion of a CTLX is successful */
switch (ctlx->state) {
case CTLX_REQ_SUBMITTED:
/* This OUT-ACK received before IN */
ctlx->state = CTLX_REQ_COMPLETE;
break;
case CTLX_RESP_COMPLETE:
/* IN already received before this OUT-ACK,
* so this command must now be complete.
*/
ctlx->state = CTLX_COMPLETE;
unlocked_usbctlx_complete(hw, ctlx);
run_queue = 1;
break;
default:
/* This is NOT a valid CTLX "success" state! */
netdev_err(hw->wlandev->netdev,
"Illegal CTLX[%d] success state(%s, %d) in OUT URB\n",
le16_to_cpu(ctlx->outbuf.type),
ctlxstr(ctlx->state), urb->status);
break;
} /* switch */
} else {
/* If the pipe has stalled then we need to reset it */
if ((urb->status == -EPIPE) &&
!test_and_set_bit(WORK_TX_HALT, &hw->usb_flags)) {
netdev_warn(hw->wlandev->netdev,
"%s tx pipe stalled: requesting reset\n",
hw->wlandev->netdev->name);
schedule_work(&hw->usb_work);
}
/* If someone cancels the OUT URB then its status
* should be either -ECONNRESET or -ENOENT.
*/
ctlx->state = CTLX_REQ_FAILED;
unlocked_usbctlx_complete(hw, ctlx);
delete_resptimer = 1;
run_queue = 1;
}
delresp:
if (delete_resptimer) {
timer_ok = del_timer(&hw->resptimer);
if (timer_ok != 0)
hw->resp_timer_done = 1;
}
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
if (!timer_ok && (hw->resp_timer_done == 0)) {
spin_lock_irqsave(&hw->ctlxq.lock, flags);
goto delresp;
}
if (run_queue)
hfa384x_usbctlxq_run(hw);
}
/*----------------------------------------------------------------
* hfa384x_usbctlx_reqtimerfn
*
* Timer response function for CTLX request timeouts. If this
* function is called, it means that the callback for the OUT
* URB containing a Prism2.x XXX_Request was never called.
*
* Arguments:
* data a ptr to the struct hfa384x
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static void hfa384x_usbctlx_reqtimerfn(struct timer_list *t)
{
struct hfa384x *hw = from_timer(hw, t, reqtimer);
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
hw->req_timer_done = 1;
/* Removing the hardware automatically empties
* the active list ...
*/
if (!list_empty(&hw->ctlxq.active)) {
/*
* We must ensure that our URB is removed from
* the system, if it hasn't already expired.
*/
hw->ctlx_urb.transfer_flags |= URB_ASYNC_UNLINK;
if (usb_unlink_urb(&hw->ctlx_urb) == -EINPROGRESS) {
struct hfa384x_usbctlx *ctlx = get_active_ctlx(hw);
ctlx->state = CTLX_REQ_FAILED;
/* This URB was active, but has now been
* cancelled. It will now have a status of
* -ECONNRESET in the callback function.
*
* We are cancelling this CTLX, so we're
* not going to need to wait for a response.
* The URB's callback function will check
* that this timer is truly dead.
*/
if (del_timer(&hw->resptimer) != 0)
hw->resp_timer_done = 1;
}
}
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
}
/*----------------------------------------------------------------
* hfa384x_usbctlx_resptimerfn
*
* Timer response function for CTLX response timeouts. If this
* function is called, it means that the callback for the IN
* URB containing a Prism2.x XXX_Response was never called.
*
* Arguments:
* data a ptr to the struct hfa384x
*
* Returns:
* nothing
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static void hfa384x_usbctlx_resptimerfn(struct timer_list *t)
{
struct hfa384x *hw = from_timer(hw, t, resptimer);
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
hw->resp_timer_done = 1;
/* The active list will be empty if the
* adapter has been unplugged ...
*/
if (!list_empty(&hw->ctlxq.active)) {
struct hfa384x_usbctlx *ctlx = get_active_ctlx(hw);
if (unlocked_usbctlx_cancel_async(hw, ctlx) == 0) {
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
hfa384x_usbctlxq_run(hw);
return;
}
}
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
}
/*----------------------------------------------------------------
* hfa384x_usb_throttlefn
*
*
* Arguments:
* data ptr to hw
*
* Returns:
* Nothing
*
* Side effects:
*
* Call context:
* Interrupt
*----------------------------------------------------------------
*/
static void hfa384x_usb_throttlefn(struct timer_list *t)
{
struct hfa384x *hw = from_timer(hw, t, throttle);
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
pr_debug("flags=0x%lx\n", hw->usb_flags);
if (!hw->wlandev->hwremoved) {
bool rx_throttle = test_and_clear_bit(THROTTLE_RX, &hw->usb_flags) &&
!test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags);
bool tx_throttle = test_and_clear_bit(THROTTLE_TX, &hw->usb_flags) &&
!test_and_set_bit(WORK_TX_RESUME, &hw->usb_flags);
/*
* We need to check BOTH the RX and the TX throttle controls,
* so we use the bitwise OR instead of the logical OR.
*/
if (rx_throttle | tx_throttle)
schedule_work(&hw->usb_work);
}
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
}
/*----------------------------------------------------------------
* hfa384x_usbctlx_submit
*
* Called from the doxxx functions to submit a CTLX to the queue
*
* Arguments:
* hw ptr to the hw struct
* ctlx ctlx structure to enqueue
*
* Returns:
* -ENODEV if the adapter is unplugged
* 0
*
* Side effects:
*
* Call context:
* process or interrupt
*----------------------------------------------------------------
*/
static int hfa384x_usbctlx_submit(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx)
{
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
if (hw->wlandev->hwremoved) {
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
return -ENODEV;
}
ctlx->state = CTLX_PENDING;
list_add_tail(&ctlx->list, &hw->ctlxq.pending);
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
hfa384x_usbctlxq_run(hw);
return 0;
}
/*----------------------------------------------------------------
* hfa384x_isgood_pdrcore
*
* Quick check of PDR codes.
*
* Arguments:
* pdrcode PDR code number (host order)
*
* Returns:
* zero not good.
* one is good.
*
* Side effects:
*
* Call context:
*----------------------------------------------------------------
*/
static int hfa384x_isgood_pdrcode(u16 pdrcode)
{
switch (pdrcode) {
case HFA384x_PDR_END_OF_PDA:
case HFA384x_PDR_PCB_PARTNUM:
case HFA384x_PDR_PDAVER:
case HFA384x_PDR_NIC_SERIAL:
case HFA384x_PDR_MKK_MEASUREMENTS:
case HFA384x_PDR_NIC_RAMSIZE:
case HFA384x_PDR_MFISUPRANGE:
case HFA384x_PDR_CFISUPRANGE:
case HFA384x_PDR_NICID:
case HFA384x_PDR_MAC_ADDRESS:
case HFA384x_PDR_REGDOMAIN:
case HFA384x_PDR_ALLOWED_CHANNEL:
case HFA384x_PDR_DEFAULT_CHANNEL:
case HFA384x_PDR_TEMPTYPE:
case HFA384x_PDR_IFR_SETTING:
case HFA384x_PDR_RFR_SETTING:
case HFA384x_PDR_HFA3861_BASELINE:
case HFA384x_PDR_HFA3861_SHADOW:
case HFA384x_PDR_HFA3861_IFRF:
case HFA384x_PDR_HFA3861_CHCALSP:
case HFA384x_PDR_HFA3861_CHCALI:
case HFA384x_PDR_3842_NIC_CONFIG:
case HFA384x_PDR_USB_ID:
case HFA384x_PDR_PCI_ID:
case HFA384x_PDR_PCI_IFCONF:
case HFA384x_PDR_PCI_PMCONF:
case HFA384x_PDR_RFENRGY:
case HFA384x_PDR_HFA3861_MANF_TESTSP:
case HFA384x_PDR_HFA3861_MANF_TESTI:
/* code is OK */
return 1;
default:
if (pdrcode < 0x1000) {
/* code is OK, but we don't know exactly what it is */
pr_debug("Encountered unknown PDR#=0x%04x, assuming it's ok.\n",
pdrcode);
return 1;
}
break;
}
/* bad code */
pr_debug("Encountered unknown PDR#=0x%04x, (>=0x1000), assuming it's bad.\n",
pdrcode);
return 0;
}
| linux-master | drivers/staging/wlan-ng/hfa384x_usb.c |
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/*
*
* Ether/802.11 conversions and packet buffer routines
*
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
* --------------------------------------------------------------------
*
* linux-wlan
*
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
* made directly to:
*
* AbsoluteValue Systems Inc.
* [email protected]
* http://www.linux-wlan.com
*
* --------------------------------------------------------------------
*
* Portions of the development of this software were funded by
* Intersil Corporation as part of PRISM(R) chipset product development.
*
* --------------------------------------------------------------------
*
* This file defines the functions that perform Ethernet to/from
* 802.11 frame conversions.
*
* --------------------------------------------------------------------
*
*================================================================
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/wireless.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/byteorder/generic.h>
#include <asm/byteorder.h>
#include "p80211types.h"
#include "p80211hdr.h"
#include "p80211conv.h"
#include "p80211mgmt.h"
#include "p80211msg.h"
#include "p80211netdev.h"
#include "p80211ioctl.h"
#include "p80211req.h"
static const u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
/*----------------------------------------------------------------
* p80211pb_ether_to_80211
*
* Uses the contents of the ether frame and the etherconv setting
* to build the elements of the 802.11 frame.
*
* We don't actually set
* up the frame header here. That's the MAC's job. We're only handling
* conversion of DIXII or 802.3+LLC frames to something that works
* with 802.11.
*
* Note -- 802.11 header is NOT part of the skb. Likewise, the 802.11
* FCS is also not present and will need to be added elsewhere.
*
* Arguments:
* ethconv Conversion type to perform
* skb skbuff containing the ether frame
* p80211_hdr 802.11 header
*
* Returns:
* 0 on success, non-zero otherwise
*
* Call context:
* May be called in interrupt or non-interrupt context
*----------------------------------------------------------------
*/
int skb_ether_to_p80211(struct wlandevice *wlandev, u32 ethconv,
struct sk_buff *skb, struct p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
{
__le16 fc;
u16 proto;
struct wlan_ethhdr e_hdr;
struct wlan_llc *e_llc;
struct wlan_snap *e_snap;
int foo;
memcpy(&e_hdr, skb->data, sizeof(e_hdr));
if (skb->len <= 0) {
pr_debug("zero-length skb!\n");
return 1;
}
if (ethconv == WLAN_ETHCONV_ENCAP) { /* simplest case */
pr_debug("ENCAP len: %d\n", skb->len);
/* here, we don't care what kind of ether frm. Just stick it */
/* in the 80211 payload */
/* which is to say, leave the skb alone. */
} else {
/* step 1: classify ether frame, DIX or 802.3? */
proto = ntohs(e_hdr.type);
if (proto <= ETH_DATA_LEN) {
pr_debug("802.3 len: %d\n", skb->len);
/* codes <= 1500 reserved for 802.3 lengths */
/* it's 802.3, pass ether payload unchanged, */
/* trim off ethernet header */
skb_pull(skb, ETH_HLEN);
/* leave off any PAD octets. */
skb_trim(skb, proto);
} else {
pr_debug("DIXII len: %d\n", skb->len);
/* it's DIXII, time for some conversion */
/* trim off ethernet header */
skb_pull(skb, ETH_HLEN);
/* tack on SNAP */
e_snap = skb_push(skb, sizeof(struct wlan_snap));
e_snap->type = htons(proto);
if (ethconv == WLAN_ETHCONV_8021h &&
p80211_stt_findproto(proto)) {
memcpy(e_snap->oui, oui_8021h,
WLAN_IEEE_OUI_LEN);
} else {
memcpy(e_snap->oui, oui_rfc1042,
WLAN_IEEE_OUI_LEN);
}
/* tack on llc */
e_llc = skb_push(skb, sizeof(struct wlan_llc));
e_llc->dsap = 0xAA; /* SNAP, see IEEE 802 */
e_llc->ssap = 0xAA;
e_llc->ctl = 0x03;
}
}
/* Set up the 802.11 header */
/* It's a data frame */
fc = cpu_to_le16(WLAN_SET_FC_FTYPE(WLAN_FTYPE_DATA) |
WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_DATAONLY));
switch (wlandev->macmode) {
case WLAN_MACMODE_IBSS_STA:
memcpy(p80211_hdr->address1, &e_hdr.daddr, ETH_ALEN);
memcpy(p80211_hdr->address2, wlandev->netdev->dev_addr, ETH_ALEN);
memcpy(p80211_hdr->address3, wlandev->bssid, ETH_ALEN);
break;
case WLAN_MACMODE_ESS_STA:
fc |= cpu_to_le16(WLAN_SET_FC_TODS(1));
memcpy(p80211_hdr->address1, wlandev->bssid, ETH_ALEN);
memcpy(p80211_hdr->address2, wlandev->netdev->dev_addr, ETH_ALEN);
memcpy(p80211_hdr->address3, &e_hdr.daddr, ETH_ALEN);
break;
case WLAN_MACMODE_ESS_AP:
fc |= cpu_to_le16(WLAN_SET_FC_FROMDS(1));
memcpy(p80211_hdr->address1, &e_hdr.daddr, ETH_ALEN);
memcpy(p80211_hdr->address2, wlandev->bssid, ETH_ALEN);
memcpy(p80211_hdr->address3, &e_hdr.saddr, ETH_ALEN);
break;
default:
netdev_err(wlandev->netdev,
"Error: Converting eth to wlan in unknown mode.\n");
return 1;
}
p80211_wep->data = NULL;
if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) &&
(wlandev->hostwep & HOSTWEP_ENCRYPT)) {
/* XXXX need to pick keynum other than default? */
p80211_wep->data = kmalloc(skb->len, GFP_ATOMIC);
if (!p80211_wep->data)
return -ENOMEM;
foo = wep_encrypt(wlandev, skb->data, p80211_wep->data,
skb->len,
wlandev->hostwep & HOSTWEP_DEFAULTKEY_MASK,
p80211_wep->iv, p80211_wep->icv);
if (foo) {
netdev_warn(wlandev->netdev,
"Host en-WEP failed, dropping frame (%d).\n",
foo);
kfree(p80211_wep->data);
return 2;
}
fc |= cpu_to_le16(WLAN_SET_FC_ISWEP(1));
}
/* skb->nh.raw = skb->data; */
p80211_hdr->frame_control = fc;
p80211_hdr->duration_id = 0;
p80211_hdr->sequence_control = 0;
return 0;
}
/* jkriegl: from orinoco, modified */
static void orinoco_spy_gather(struct wlandevice *wlandev, char *mac,
struct p80211_rxmeta *rxmeta)
{
int i;
/* Gather wireless spy statistics: for each packet, compare the
* source address with out list, and if match, get the stats...
*/
for (i = 0; i < wlandev->spy_number; i++) {
if (!memcmp(wlandev->spy_address[i], mac, ETH_ALEN)) {
wlandev->spy_stat[i].level = rxmeta->signal;
wlandev->spy_stat[i].noise = rxmeta->noise;
wlandev->spy_stat[i].qual =
(rxmeta->signal >
rxmeta->noise) ? (rxmeta->signal -
rxmeta->noise) : 0;
wlandev->spy_stat[i].updated = 0x7;
}
}
}
/*----------------------------------------------------------------
* p80211pb_80211_to_ether
*
* Uses the contents of a received 802.11 frame and the etherconv
* setting to build an ether frame.
*
* This function extracts the src and dest address from the 802.11
* frame to use in the construction of the eth frame.
*
* Arguments:
* ethconv Conversion type to perform
* skb Packet buffer containing the 802.11 frame
*
* Returns:
* 0 on success, non-zero otherwise
*
* Call context:
* May be called in interrupt or non-interrupt context
*----------------------------------------------------------------
*/
int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
struct sk_buff *skb)
{
struct net_device *netdev = wlandev->netdev;
u16 fc;
unsigned int payload_length;
unsigned int payload_offset;
u8 daddr[ETH_ALEN];
u8 saddr[ETH_ALEN];
struct p80211_hdr *w_hdr;
struct wlan_ethhdr *e_hdr;
struct wlan_llc *e_llc;
struct wlan_snap *e_snap;
int foo;
payload_length = skb->len - WLAN_HDR_A3_LEN - WLAN_CRC_LEN;
payload_offset = WLAN_HDR_A3_LEN;
w_hdr = (struct p80211_hdr *)skb->data;
/* setup some vars for convenience */
fc = le16_to_cpu(w_hdr->frame_control);
if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 0)) {
ether_addr_copy(daddr, w_hdr->address1);
ether_addr_copy(saddr, w_hdr->address2);
} else if ((WLAN_GET_FC_TODS(fc) == 0) &&
(WLAN_GET_FC_FROMDS(fc) == 1)) {
ether_addr_copy(daddr, w_hdr->address1);
ether_addr_copy(saddr, w_hdr->address3);
} else if ((WLAN_GET_FC_TODS(fc) == 1) &&
(WLAN_GET_FC_FROMDS(fc) == 0)) {
ether_addr_copy(daddr, w_hdr->address3);
ether_addr_copy(saddr, w_hdr->address2);
} else {
payload_offset = WLAN_HDR_A4_LEN;
if (payload_length < WLAN_HDR_A4_LEN - WLAN_HDR_A3_LEN) {
netdev_err(netdev, "A4 frame too short!\n");
return 1;
}
payload_length -= (WLAN_HDR_A4_LEN - WLAN_HDR_A3_LEN);
ether_addr_copy(daddr, w_hdr->address3);
ether_addr_copy(saddr, w_hdr->address4);
}
/* perform de-wep if necessary.. */
if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) &&
WLAN_GET_FC_ISWEP(fc) &&
(wlandev->hostwep & HOSTWEP_DECRYPT)) {
if (payload_length <= 8) {
netdev_err(netdev,
"WEP frame too short (%u).\n", skb->len);
return 1;
}
foo = wep_decrypt(wlandev, skb->data + payload_offset + 4,
payload_length - 8, -1,
skb->data + payload_offset,
skb->data + payload_offset +
payload_length - 4);
if (foo) {
/* de-wep failed, drop skb. */
pr_debug("Host de-WEP failed, dropping frame (%d).\n",
foo);
wlandev->rx.decrypt_err++;
return 2;
}
/* subtract the IV+ICV length off the payload */
payload_length -= 8;
/* chop off the IV */
skb_pull(skb, 4);
/* chop off the ICV. */
skb_trim(skb, skb->len - 4);
wlandev->rx.decrypt++;
}
e_hdr = (struct wlan_ethhdr *)(skb->data + payload_offset);
e_llc = (struct wlan_llc *)(skb->data + payload_offset);
e_snap =
(struct wlan_snap *)(skb->data + payload_offset +
sizeof(struct wlan_llc));
/* Test for the various encodings */
if ((payload_length >= sizeof(struct wlan_ethhdr)) &&
(e_llc->dsap != 0xaa || e_llc->ssap != 0xaa) &&
((!ether_addr_equal_unaligned(daddr, e_hdr->daddr)) ||
(!ether_addr_equal_unaligned(saddr, e_hdr->saddr)))) {
pr_debug("802.3 ENCAP len: %d\n", payload_length);
/* 802.3 Encapsulated */
/* Test for an overlength frame */
if (payload_length > (netdev->mtu + ETH_HLEN)) {
/* A bogus length ethfrm has been encap'd. */
/* Is someone trying an oflow attack? */
netdev_err(netdev, "ENCAP frame too large (%d > %d)\n",
payload_length, netdev->mtu + ETH_HLEN);
return 1;
}
/* Chop off the 802.11 header. it's already sane. */
skb_pull(skb, payload_offset);
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
} else if ((payload_length >= sizeof(struct wlan_llc) +
sizeof(struct wlan_snap)) &&
(e_llc->dsap == 0xaa) &&
(e_llc->ssap == 0xaa) &&
(e_llc->ctl == 0x03) &&
(((memcmp(e_snap->oui, oui_rfc1042,
WLAN_IEEE_OUI_LEN) == 0) &&
(ethconv == WLAN_ETHCONV_8021h) &&
(p80211_stt_findproto(be16_to_cpu(e_snap->type)))) ||
(memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) !=
0))) {
pr_debug("SNAP+RFC1042 len: %d\n", payload_length);
/* it's a SNAP + RFC1042 frame && protocol is in STT */
/* build 802.3 + RFC1042 */
/* Test for an overlength frame */
if (payload_length > netdev->mtu) {
/* A bogus length ethfrm has been sent. */
/* Is someone trying an oflow attack? */
netdev_err(netdev, "SNAP frame too large (%d > %d)\n",
payload_length, netdev->mtu);
return 1;
}
/* chop 802.11 header from skb. */
skb_pull(skb, payload_offset);
/* create 802.3 header at beginning of skb. */
e_hdr = skb_push(skb, ETH_HLEN);
ether_addr_copy(e_hdr->daddr, daddr);
ether_addr_copy(e_hdr->saddr, saddr);
e_hdr->type = htons(payload_length);
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
} else if ((payload_length >= sizeof(struct wlan_llc) +
sizeof(struct wlan_snap)) &&
(e_llc->dsap == 0xaa) &&
(e_llc->ssap == 0xaa) &&
(e_llc->ctl == 0x03)) {
pr_debug("802.1h/RFC1042 len: %d\n", payload_length);
/* it's an 802.1h frame || (an RFC1042 && protocol not in STT)
* build a DIXII + RFC894
*/
/* Test for an overlength frame */
if ((payload_length - sizeof(struct wlan_llc) -
sizeof(struct wlan_snap))
> netdev->mtu) {
/* A bogus length ethfrm has been sent. */
/* Is someone trying an oflow attack? */
netdev_err(netdev, "DIXII frame too large (%ld > %d)\n",
(long)(payload_length -
sizeof(struct wlan_llc) -
sizeof(struct wlan_snap)), netdev->mtu);
return 1;
}
/* chop 802.11 header from skb. */
skb_pull(skb, payload_offset);
/* chop llc header from skb. */
skb_pull(skb, sizeof(struct wlan_llc));
/* chop snap header from skb. */
skb_pull(skb, sizeof(struct wlan_snap));
/* create 802.3 header at beginning of skb. */
e_hdr = skb_push(skb, ETH_HLEN);
e_hdr->type = e_snap->type;
ether_addr_copy(e_hdr->daddr, daddr);
ether_addr_copy(e_hdr->saddr, saddr);
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
} else {
pr_debug("NON-ENCAP len: %d\n", payload_length);
/* any NON-ENCAP */
/* it's a generic 80211+LLC or IPX 'Raw 802.3' */
/* build an 802.3 frame */
/* allocate space and setup hostbuf */
/* Test for an overlength frame */
if (payload_length > netdev->mtu) {
/* A bogus length ethfrm has been sent. */
/* Is someone trying an oflow attack? */
netdev_err(netdev, "OTHER frame too large (%d > %d)\n",
payload_length, netdev->mtu);
return 1;
}
/* Chop off the 802.11 header. */
skb_pull(skb, payload_offset);
/* create 802.3 header at beginning of skb. */
e_hdr = skb_push(skb, ETH_HLEN);
ether_addr_copy(e_hdr->daddr, daddr);
ether_addr_copy(e_hdr->saddr, saddr);
e_hdr->type = htons(payload_length);
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
}
/*
* Note that eth_type_trans() expects an skb w/ skb->data pointing
* at the MAC header, it then sets the following skb members:
* skb->mac_header,
* skb->data, and
* skb->pkt_type.
* It then _returns_ the value that _we're_ supposed to stuff in
* skb->protocol. This is nuts.
*/
skb->protocol = eth_type_trans(skb, netdev);
/* jkriegl: process signal and noise as set in hfa384x_int_rx() */
/* jkriegl: only process signal/noise if requested by iwspy */
if (wlandev->spy_number)
orinoco_spy_gather(wlandev, eth_hdr(skb)->h_source,
p80211skb_rxmeta(skb));
/* Free the metadata */
p80211skb_rxmeta_detach(skb);
return 0;
}
/*----------------------------------------------------------------
* p80211_stt_findproto
*
* Searches the 802.1h Selective Translation Table for a given
* protocol.
*
* Arguments:
* proto protocol number (in host order) to search for.
*
* Returns:
* 1 - if the table is empty or a match is found.
* 0 - if the table is non-empty and a match is not found.
*
* Call context:
* May be called in interrupt or non-interrupt context
*----------------------------------------------------------------
*/
int p80211_stt_findproto(u16 proto)
{
/* Always return found for now. This is the behavior used by the */
/* Zoom Win95 driver when 802.1h mode is selected */
/* TODO: If necessary, add an actual search we'll probably
* need this to match the CMAC's way of doing things.
* Need to do some testing to confirm.
*/
if (proto == ETH_P_AARP) /* APPLETALK */
return 1;
return 0;
}
/*----------------------------------------------------------------
* p80211skb_rxmeta_detach
*
* Disconnects the frmmeta and rxmeta from an skb.
*
* Arguments:
* wlandev The wlandev this skb belongs to.
* skb The skb we're attaching to.
*
* Returns:
* 0 on success, non-zero otherwise
*
* Call context:
* May be called in interrupt or non-interrupt context
*----------------------------------------------------------------
*/
void p80211skb_rxmeta_detach(struct sk_buff *skb)
{
struct p80211_rxmeta *rxmeta;
struct p80211_frmmeta *frmmeta;
/* Sanity checks */
if (!skb) { /* bad skb */
pr_debug("Called w/ null skb.\n");
return;
}
frmmeta = p80211skb_frmmeta(skb);
if (!frmmeta) { /* no magic */
pr_debug("Called w/ bad frmmeta magic.\n");
return;
}
rxmeta = frmmeta->rx;
if (!rxmeta) { /* bad meta ptr */
pr_debug("Called w/ bad rxmeta ptr.\n");
return;
}
/* Free rxmeta */
kfree(rxmeta);
/* Clear skb->cb */
memset(skb->cb, 0, sizeof(skb->cb));
}
/*----------------------------------------------------------------
* p80211skb_rxmeta_attach
*
* Allocates a p80211rxmeta structure, initializes it, and attaches
* it to an skb.
*
* Arguments:
* wlandev The wlandev this skb belongs to.
* skb The skb we're attaching to.
*
* Returns:
* 0 on success, non-zero otherwise
*
* Call context:
* May be called in interrupt or non-interrupt context
*----------------------------------------------------------------
*/
int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
{
int result = 0;
struct p80211_rxmeta *rxmeta;
struct p80211_frmmeta *frmmeta;
/* If these already have metadata, we error out! */
if (p80211skb_rxmeta(skb)) {
netdev_err(wlandev->netdev,
"%s: RXmeta already attached!\n", wlandev->name);
result = 0;
goto exit;
}
/* Allocate the rxmeta */
rxmeta = kzalloc(sizeof(*rxmeta), GFP_ATOMIC);
if (!rxmeta) {
result = 1;
goto exit;
}
/* Initialize the rxmeta */
rxmeta->wlandev = wlandev;
rxmeta->hosttime = jiffies;
/* Overlay a frmmeta_t onto skb->cb */
memset(skb->cb, 0, sizeof(struct p80211_frmmeta));
frmmeta = (struct p80211_frmmeta *)(skb->cb);
frmmeta->magic = P80211_FRMMETA_MAGIC;
frmmeta->rx = rxmeta;
exit:
return result;
}
/*----------------------------------------------------------------
* p80211skb_free
*
* Frees an entire p80211skb by checking and freeing the meta struct
* and then freeing the skb.
*
* Arguments:
* wlandev The wlandev this skb belongs to.
* skb The skb we're attaching to.
*
* Returns:
* 0 on success, non-zero otherwise
*
* Call context:
* May be called in interrupt or non-interrupt context
*----------------------------------------------------------------
*/
void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb)
{
struct p80211_frmmeta *meta;
meta = p80211skb_frmmeta(skb);
if (meta && meta->rx)
p80211skb_rxmeta_detach(skb);
else
netdev_err(wlandev->netdev,
"Freeing an skb (%p) w/ no frmmeta.\n", skb);
dev_kfree_skb(skb);
}
| linux-master | drivers/staging/wlan-ng/p80211conv.c |
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* from src/prism2/download/prism2dl.c
*
* utility for downloading prism2 images moved into kernelspace
*
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
* --------------------------------------------------------------------
*
* linux-wlan
*
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
* made directly to:
*
* AbsoluteValue Systems Inc.
* [email protected]
* http://www.linux-wlan.com
*
* --------------------------------------------------------------------
*
* Portions of the development of this software were funded by
* Intersil Corporation as part of PRISM(R) chipset product development.
*
* --------------------------------------------------------------------
*/
/*================================================================*/
/* System Includes */
#include <linux/ihex.h>
#include <linux/slab.h>
/*================================================================*/
/* Local Constants */
#define PRISM2_USB_FWFILE "prism2_ru.fw"
MODULE_FIRMWARE(PRISM2_USB_FWFILE);
#define S3DATA_MAX 5000
#define S3PLUG_MAX 200
#define S3CRC_MAX 200
#define S3INFO_MAX 50
#define S3ADDR_PLUG (0xff000000UL)
#define S3ADDR_CRC (0xff100000UL)
#define S3ADDR_INFO (0xff200000UL)
#define S3ADDR_START (0xff400000UL)
#define CHUNKS_MAX 100
#define WRITESIZE_MAX 4096
/*================================================================*/
/* Local Types */
struct s3datarec {
u32 len;
u32 addr;
u8 checksum;
u8 *data;
};
struct s3plugrec {
u32 itemcode;
u32 addr;
u32 len;
};
struct s3crcrec {
u32 addr;
u32 len;
unsigned int dowrite;
};
struct s3inforec {
u16 len;
u16 type;
union {
struct hfa384x_compident version;
struct hfa384x_caplevel compat;
u16 buildseq;
struct hfa384x_compident platform;
} info;
};
struct pda {
u8 buf[HFA384x_PDA_LEN_MAX];
struct hfa384x_pdrec *rec[HFA384x_PDA_RECS_MAX];
unsigned int nrec;
};
struct imgchunk {
u32 addr; /* start address */
u32 len; /* in bytes */
u16 crc; /* CRC value (if it falls at a chunk boundary) */
u8 *data;
};
/*================================================================*/
/* Local Static Definitions */
/*----------------------------------------------------------------*/
/* s-record image processing */
/* Data records */
static unsigned int ns3data;
static struct s3datarec *s3data;
/* Plug records */
static unsigned int ns3plug;
static struct s3plugrec s3plug[S3PLUG_MAX];
/* CRC records */
static unsigned int ns3crc;
static struct s3crcrec s3crc[S3CRC_MAX];
/* Info records */
static unsigned int ns3info;
static struct s3inforec s3info[S3INFO_MAX];
/* S7 record (there _better_ be only one) */
static u32 startaddr;
/* Load image chunks */
static unsigned int nfchunks;
static struct imgchunk fchunk[CHUNKS_MAX];
/* Note that for the following pdrec_t arrays, the len and code */
/* fields are stored in HOST byte order. The mkpdrlist() function */
/* does the conversion. */
/*----------------------------------------------------------------*/
/* PDA, built from [card|newfile]+[addfile1+addfile2...] */
static struct pda pda;
static struct hfa384x_compident nicid;
static struct hfa384x_caplevel rfid;
static struct hfa384x_caplevel macid;
static struct hfa384x_caplevel priid;
/*================================================================*/
/* Local Function Declarations */
static int prism2_fwapply(const struct ihex_binrec *rfptr,
struct wlandevice *wlandev);
static int read_fwfile(const struct ihex_binrec *rfptr);
static int mkimage(struct imgchunk *clist, unsigned int *ccnt);
static int read_cardpda(struct pda *pda, struct wlandevice *wlandev);
static int mkpdrlist(struct pda *pda);
static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
struct s3plugrec *s3plug, unsigned int ns3plug,
struct pda *pda);
static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
struct s3crcrec *s3crc, unsigned int ns3crc);
static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
unsigned int nfchunks);
static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks);
static void free_srecs(void);
static int validate_identity(void);
/*================================================================*/
/* Function Definitions */
/*----------------------------------------------------------------
* prism2_fwtry
*
* Try and get firmware into memory
*
* Arguments:
* udev usb device structure
* wlandev wlan device structure
*
* Returns:
* 0 - success
* ~0 - failure
*----------------------------------------------------------------
*/
static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev)
{
const struct firmware *fw_entry = NULL;
netdev_info(wlandev->netdev, "prism2_usb: Checking for firmware %s\n",
PRISM2_USB_FWFILE);
if (request_ihex_firmware(&fw_entry,
PRISM2_USB_FWFILE, &udev->dev) != 0) {
netdev_info(wlandev->netdev,
"prism2_usb: Firmware not available, but not essential\n");
netdev_info(wlandev->netdev,
"prism2_usb: can continue to use card anyway.\n");
return 1;
}
netdev_info(wlandev->netdev,
"prism2_usb: %s will be processed, size %zu\n",
PRISM2_USB_FWFILE, fw_entry->size);
prism2_fwapply((const struct ihex_binrec *)fw_entry->data, wlandev);
release_firmware(fw_entry);
return 0;
}
/*----------------------------------------------------------------
* prism2_fwapply
*
* Apply the firmware loaded into memory
*
* Arguments:
* rfptr firmware image in kernel memory
* wlandev device
*
* Returns:
* 0 - success
* ~0 - failure
*----------------------------------------------------------------
*/
static int prism2_fwapply(const struct ihex_binrec *rfptr,
struct wlandevice *wlandev)
{
signed int result = 0;
struct p80211msg_dot11req_mibget getmsg;
struct p80211itemd *item;
u32 *data;
/* Initialize the data structures */
ns3data = 0;
s3data = kcalloc(S3DATA_MAX, sizeof(*s3data), GFP_KERNEL);
if (!s3data) {
result = -ENOMEM;
goto out;
}
ns3plug = 0;
memset(s3plug, 0, sizeof(s3plug));
ns3crc = 0;
memset(s3crc, 0, sizeof(s3crc));
ns3info = 0;
memset(s3info, 0, sizeof(s3info));
startaddr = 0;
nfchunks = 0;
memset(fchunk, 0, sizeof(fchunk));
memset(&nicid, 0, sizeof(nicid));
memset(&rfid, 0, sizeof(rfid));
memset(&macid, 0, sizeof(macid));
memset(&priid, 0, sizeof(priid));
/* clear the pda and add an initial END record */
memset(&pda, 0, sizeof(pda));
pda.rec[0] = (struct hfa384x_pdrec *)pda.buf;
pda.rec[0]->len = cpu_to_le16(2); /* len in words */
pda.rec[0]->code = cpu_to_le16(HFA384x_PDR_END_OF_PDA);
pda.nrec = 1;
/*-----------------------------------------------------*/
/* Put card into fwload state */
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_fwload);
/* Build the PDA we're going to use. */
if (read_cardpda(&pda, wlandev)) {
netdev_err(wlandev->netdev, "load_cardpda failed, exiting.\n");
result = 1;
goto out;
}
/* read the card's PRI-SUP */
memset(&getmsg, 0, sizeof(getmsg));
getmsg.msgcode = DIDMSG_DOT11REQ_MIBGET;
getmsg.msglen = sizeof(getmsg);
strscpy(getmsg.devname, wlandev->name, sizeof(getmsg.devname));
getmsg.mibattribute.did = DIDMSG_DOT11REQ_MIBGET_MIBATTRIBUTE;
getmsg.mibattribute.status = P80211ENUM_msgitem_status_data_ok;
getmsg.resultcode.did = DIDMSG_DOT11REQ_MIBGET_RESULTCODE;
getmsg.resultcode.status = P80211ENUM_msgitem_status_no_value;
item = (struct p80211itemd *)getmsg.mibattribute.data;
item->did = DIDMIB_P2_NIC_PRISUPRANGE;
item->status = P80211ENUM_msgitem_status_no_value;
data = (u32 *)item->data;
/* DIDmsg_dot11req_mibget */
prism2mgmt_mibset_mibget(wlandev, &getmsg);
if (getmsg.resultcode.data != P80211ENUM_resultcode_success)
netdev_err(wlandev->netdev, "Couldn't fetch PRI-SUP info\n");
/* Already in host order */
priid.role = *data++;
priid.id = *data++;
priid.variant = *data++;
priid.bottom = *data++;
priid.top = *data++;
/* Read the S3 file */
result = read_fwfile(rfptr);
if (result) {
netdev_err(wlandev->netdev,
"Failed to read the data exiting.\n");
goto out;
}
result = validate_identity();
if (result) {
netdev_err(wlandev->netdev, "Incompatible firmware image.\n");
goto out;
}
if (startaddr == 0x00000000) {
netdev_err(wlandev->netdev,
"Can't RAM download a Flash image!\n");
result = 1;
goto out;
}
/* Make the image chunks */
result = mkimage(fchunk, &nfchunks);
if (result) {
netdev_err(wlandev->netdev, "Failed to make image chunk.\n");
goto free_chunks;
}
/* Do any plugging */
result = plugimage(fchunk, nfchunks, s3plug, ns3plug, &pda);
if (result) {
netdev_err(wlandev->netdev, "Failed to plug data.\n");
goto free_chunks;
}
/* Insert any CRCs */
result = crcimage(fchunk, nfchunks, s3crc, ns3crc);
if (result) {
netdev_err(wlandev->netdev, "Failed to insert all CRCs\n");
goto free_chunks;
}
/* Write the image */
result = writeimage(wlandev, fchunk, nfchunks);
if (result) {
netdev_err(wlandev->netdev, "Failed to ramwrite image data.\n");
goto free_chunks;
}
netdev_info(wlandev->netdev, "prism2_usb: firmware loading finished.\n");
free_chunks:
/* clear any allocated memory */
free_chunks(fchunk, &nfchunks);
free_srecs();
out:
return result;
}
/*----------------------------------------------------------------
* crcimage
*
* Adds a CRC16 in the two bytes prior to each block identified by
* an S3 CRC record. Currently, we don't actually do a CRC we just
* insert the value 0xC0DE in hfa384x order.
*
* Arguments:
* fchunk Array of image chunks
* nfchunks Number of image chunks
* s3crc Array of crc records
* ns3crc Number of crc records
*
* Returns:
* 0 success
* ~0 failure
*----------------------------------------------------------------
*/
static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
struct s3crcrec *s3crc, unsigned int ns3crc)
{
int result = 0;
int i;
int c;
u32 crcstart;
u32 cstart = 0;
u32 cend;
u8 *dest;
u32 chunkoff;
for (i = 0; i < ns3crc; i++) {
if (!s3crc[i].dowrite)
continue;
crcstart = s3crc[i].addr;
/* Find chunk */
for (c = 0; c < nfchunks; c++) {
cstart = fchunk[c].addr;
cend = fchunk[c].addr + fchunk[c].len;
/* the line below does an address & len match search */
/* unfortunately, I've found that the len fields of */
/* some crc records don't match with the length of */
/* the actual data, so we're not checking right now */
/* if (crcstart-2 >= cstart && crcend <= cend) break; */
/* note the -2 below, it's to make sure the chunk has */
/* space for the CRC value */
if (crcstart - 2 >= cstart && crcstart < cend)
break;
}
if (c >= nfchunks) {
pr_err("Failed to find chunk for crcrec[%d], addr=0x%06x len=%d , aborting crc.\n",
i, s3crc[i].addr, s3crc[i].len);
return 1;
}
/* Insert crc */
pr_debug("Adding crc @ 0x%06x\n", s3crc[i].addr - 2);
chunkoff = crcstart - cstart - 2;
dest = fchunk[c].data + chunkoff;
*dest = 0xde;
*(dest + 1) = 0xc0;
}
return result;
}
/*----------------------------------------------------------------
* free_chunks
*
* Clears the chunklist data structures in preparation for a new file.
*
* Arguments:
* none
*
* Returns:
* nothing
*----------------------------------------------------------------
*/
static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks)
{
int i;
for (i = 0; i < *nfchunks; i++)
kfree(fchunk[i].data);
*nfchunks = 0;
memset(fchunk, 0, sizeof(*fchunk));
}
/*----------------------------------------------------------------
* free_srecs
*
* Clears the srec data structures in preparation for a new file.
*
* Arguments:
* none
*
* Returns:
* nothing
*----------------------------------------------------------------
*/
static void free_srecs(void)
{
ns3data = 0;
kfree(s3data);
ns3plug = 0;
memset(s3plug, 0, sizeof(s3plug));
ns3crc = 0;
memset(s3crc, 0, sizeof(s3crc));
ns3info = 0;
memset(s3info, 0, sizeof(s3info));
startaddr = 0;
}
/*----------------------------------------------------------------
* mkimage
*
* Scans the currently loaded set of S records for data residing
* in contiguous memory regions. Each contiguous region is then
* made into a 'chunk'. This function assumes that we're building
* a new chunk list. Assumes the s3data items are in sorted order.
*
* Arguments: none
*
* Returns:
* 0 - success
* ~0 - failure (probably an errno)
*----------------------------------------------------------------
*/
static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
{
int result = 0;
int i;
int j;
int currchunk = 0;
u32 nextaddr = 0;
u32 s3start;
u32 s3end;
u32 cstart = 0;
u32 cend;
u32 coffset;
/* There may already be data in the chunklist */
*ccnt = 0;
/* Establish the location and size of each chunk */
for (i = 0; i < ns3data; i++) {
if (s3data[i].addr == nextaddr) {
/* existing chunk, grow it */
clist[currchunk].len += s3data[i].len;
nextaddr += s3data[i].len;
} else {
/* New chunk */
(*ccnt)++;
currchunk = *ccnt - 1;
clist[currchunk].addr = s3data[i].addr;
clist[currchunk].len = s3data[i].len;
nextaddr = s3data[i].addr + s3data[i].len;
/* Expand the chunk if there is a CRC record at */
/* their beginning bound */
for (j = 0; j < ns3crc; j++) {
if (s3crc[j].dowrite &&
s3crc[j].addr == clist[currchunk].addr) {
clist[currchunk].addr -= 2;
clist[currchunk].len += 2;
}
}
}
}
/* We're currently assuming there aren't any overlapping chunks */
/* if this proves false, we'll need to add code to coalesce. */
/* Allocate buffer space for chunks */
for (i = 0; i < *ccnt; i++) {
clist[i].data = kzalloc(clist[i].len, GFP_KERNEL);
if (!clist[i].data)
return 1;
pr_debug("chunk[%d]: addr=0x%06x len=%d\n",
i, clist[i].addr, clist[i].len);
}
/* Copy srec data to chunks */
for (i = 0; i < ns3data; i++) {
s3start = s3data[i].addr;
s3end = s3start + s3data[i].len - 1;
for (j = 0; j < *ccnt; j++) {
cstart = clist[j].addr;
cend = cstart + clist[j].len - 1;
if (s3start >= cstart && s3end <= cend)
break;
}
if (((unsigned int)j) >= (*ccnt)) {
pr_err("s3rec(a=0x%06x,l=%d), no chunk match, exiting.\n",
s3start, s3data[i].len);
return 1;
}
coffset = s3start - cstart;
memcpy(clist[j].data + coffset, s3data[i].data, s3data[i].len);
}
return result;
}
/*----------------------------------------------------------------
* mkpdrlist
*
* Reads a raw PDA and builds an array of pdrec_t structures.
*
* Arguments:
* pda buffer containing raw PDA bytes
* pdrec ptr to an array of pdrec_t's. Will be filled on exit.
* nrec ptr to a variable that will contain the count of PDRs
*
* Returns:
* 0 - success
* ~0 - failure (probably an errno)
*----------------------------------------------------------------
*/
static int mkpdrlist(struct pda *pda)
{
__le16 *pda16 = (__le16 *)pda->buf;
int curroff; /* in 'words' */
pda->nrec = 0;
curroff = 0;
while (curroff < (HFA384x_PDA_LEN_MAX / 2 - 1) &&
le16_to_cpu(pda16[curroff + 1]) != HFA384x_PDR_END_OF_PDA) {
pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff];
if (le16_to_cpu(pda->rec[pda->nrec]->code) ==
HFA384x_PDR_NICID) {
memcpy(&nicid, &pda->rec[pda->nrec]->data.nicid,
sizeof(nicid));
le16_to_cpus(&nicid.id);
le16_to_cpus(&nicid.variant);
le16_to_cpus(&nicid.major);
le16_to_cpus(&nicid.minor);
}
if (le16_to_cpu(pda->rec[pda->nrec]->code) ==
HFA384x_PDR_MFISUPRANGE) {
memcpy(&rfid, &pda->rec[pda->nrec]->data.mfisuprange,
sizeof(rfid));
le16_to_cpus(&rfid.id);
le16_to_cpus(&rfid.variant);
le16_to_cpus(&rfid.bottom);
le16_to_cpus(&rfid.top);
}
if (le16_to_cpu(pda->rec[pda->nrec]->code) ==
HFA384x_PDR_CFISUPRANGE) {
memcpy(&macid, &pda->rec[pda->nrec]->data.cfisuprange,
sizeof(macid));
le16_to_cpus(&macid.id);
le16_to_cpus(&macid.variant);
le16_to_cpus(&macid.bottom);
le16_to_cpus(&macid.top);
}
(pda->nrec)++;
curroff += le16_to_cpu(pda16[curroff]) + 1;
}
if (curroff >= (HFA384x_PDA_LEN_MAX / 2 - 1)) {
pr_err("no end record found or invalid lengths in PDR data, exiting. %x %d\n",
curroff, pda->nrec);
return 1;
}
pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff];
(pda->nrec)++;
return 0;
}
/*----------------------------------------------------------------
* plugimage
*
* Plugs the given image using the given plug records from the given
* PDA and filename.
*
* Arguments:
* fchunk Array of image chunks
* nfchunks Number of image chunks
* s3plug Array of plug records
* ns3plug Number of plug records
* pda Current pda data
*
* Returns:
* 0 success
* ~0 failure
*----------------------------------------------------------------
*/
static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
struct s3plugrec *s3plug, unsigned int ns3plug,
struct pda *pda)
{
int result = 0;
int i; /* plug index */
int j; /* index of PDR or -1 if fname plug */
int c; /* chunk index */
u32 pstart;
u32 pend;
u32 cstart = 0;
u32 cend;
u32 chunkoff;
u8 *dest;
/* for each plug record */
for (i = 0; i < ns3plug; i++) {
pstart = s3plug[i].addr;
pend = s3plug[i].addr + s3plug[i].len;
j = -1;
/* find the matching PDR (or filename) */
if (s3plug[i].itemcode != 0xffffffffUL) { /* not filename */
for (j = 0; j < pda->nrec; j++) {
if (s3plug[i].itemcode ==
le16_to_cpu(pda->rec[j]->code))
break;
}
}
if (j >= pda->nrec && j != -1) { /* if no matching PDR, fail */
pr_warn("warning: Failed to find PDR for plugrec 0x%04x.\n",
s3plug[i].itemcode);
continue; /* and move on to the next PDR */
/* MSM: They swear that unless it's the MAC address,
* the serial number, or the TX calibration records,
* then there's reasonable defaults in the f/w
* image. Therefore, missing PDRs in the card
* should only be a warning, not fatal.
* TODO: add fatals for the PDRs mentioned above.
*/
}
/* Validate plug len against PDR len */
if (j != -1 && s3plug[i].len < le16_to_cpu(pda->rec[j]->len)) {
pr_err("error: Plug vs. PDR len mismatch for plugrec 0x%04x, abort plugging.\n",
s3plug[i].itemcode);
result = 1;
continue;
}
/*
* Validate plug address against
* chunk data and identify chunk
*/
for (c = 0; c < nfchunks; c++) {
cstart = fchunk[c].addr;
cend = fchunk[c].addr + fchunk[c].len;
if (pstart >= cstart && pend <= cend)
break;
}
if (c >= nfchunks) {
pr_err("error: Failed to find image chunk for plugrec 0x%04x.\n",
s3plug[i].itemcode);
result = 1;
continue;
}
/* Plug data */
chunkoff = pstart - cstart;
dest = fchunk[c].data + chunkoff;
pr_debug("Plugging item 0x%04x @ 0x%06x, len=%d, cnum=%d coff=0x%06x\n",
s3plug[i].itemcode, pstart, s3plug[i].len,
c, chunkoff);
if (j == -1) { /* plug the filename */
memset(dest, 0, s3plug[i].len);
strncpy(dest, PRISM2_USB_FWFILE, s3plug[i].len - 1);
} else { /* plug a PDR */
memcpy(dest, &pda->rec[j]->data, s3plug[i].len);
}
}
return result;
}
/*----------------------------------------------------------------
* read_cardpda
*
* Sends the command for the driver to read the pda from the card
* named in the device variable. Upon success, the card pda is
* stored in the "cardpda" variables. Note that the pda structure
* is considered 'well formed' after this function. That means
* that the nrecs is valid, the rec array has been set up, and there's
* a valid PDAEND record in the raw PDA data.
*
* Arguments:
* pda pda structure
* wlandev device
*
* Returns:
* 0 - success
* ~0 - failure (probably an errno)
*----------------------------------------------------------------
*/
static int read_cardpda(struct pda *pda, struct wlandevice *wlandev)
{
int result = 0;
struct p80211msg_p2req_readpda *msg;
msg = kzalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
/* set up the msg */
msg->msgcode = DIDMSG_P2REQ_READPDA;
msg->msglen = sizeof(msg);
strscpy(msg->devname, wlandev->name, sizeof(msg->devname));
msg->pda.did = DIDMSG_P2REQ_READPDA_PDA;
msg->pda.len = HFA384x_PDA_LEN_MAX;
msg->pda.status = P80211ENUM_msgitem_status_no_value;
msg->resultcode.did = DIDMSG_P2REQ_READPDA_RESULTCODE;
msg->resultcode.len = sizeof(u32);
msg->resultcode.status = P80211ENUM_msgitem_status_no_value;
if (prism2mgmt_readpda(wlandev, msg) != 0) {
/* prism2mgmt_readpda prints an errno if appropriate */
result = -1;
} else if (msg->resultcode.data == P80211ENUM_resultcode_success) {
memcpy(pda->buf, msg->pda.data, HFA384x_PDA_LEN_MAX);
result = mkpdrlist(pda);
} else {
/* resultcode must've been something other than success */
result = -1;
}
kfree(msg);
return result;
}
/*----------------------------------------------------------------
* read_fwfile
*
* Reads the given fw file which should have been compiled from an srec
* file. Each record in the fw file will either be a plain data record,
* a start address record, or other records used for plugging.
*
* Note that data records are expected to be sorted into
* ascending address order in the fw file.
*
* Note also that the start address record, originally an S7 record in
* the srec file, is expected in the fw file to be like a data record but
* with a certain address to make it identifiable.
*
* Here's the SREC format that the fw should have come from:
* S[37]nnaaaaaaaaddd...dddcc
*
* nn - number of bytes starting with the address field
* aaaaaaaa - address in readable (or big endian) format
* dd....dd - 0-245 data bytes (two chars per byte)
* cc - checksum
*
* The S7 record's (there should be only one) address value gets
* converted to an S3 record with address of 0xff400000, with the
* start address being stored as a 4 byte data word. That address is
* the start execution address used for RAM downloads.
*
* The S3 records have a collection of subformats indicated by the
* value of aaaaaaaa:
* 0xff000000 - Plug record, data field format:
* xxxxxxxxaaaaaaaassssssss
* x - PDR code number (little endian)
* a - Address in load image to plug (little endian)
* s - Length of plug data area (little endian)
*
* 0xff100000 - CRC16 generation record, data field format:
* aaaaaaaassssssssbbbbbbbb
* a - Start address for CRC calculation (little endian)
* s - Length of data to calculate over (little endian)
* b - Boolean, true=write crc, false=don't write
*
* 0xff200000 - Info record, data field format:
* ssssttttdd..dd
* s - Size in words (little endian)
* t - Info type (little endian), see #defines and
* struct s3inforec for details about types.
* d - (s - 1) little endian words giving the contents of
* the given info type.
*
* 0xff400000 - Start address record, data field format:
* aaaaaaaa
* a - Address in load image to plug (little endian)
*
* Arguments:
* record firmware image (ihex record structure) in kernel memory
*
* Returns:
* 0 - success
* ~0 - failure (probably an errno)
*----------------------------------------------------------------
*/
static int read_fwfile(const struct ihex_binrec *record)
{
int i;
int rcnt = 0;
u16 *tmpinfo;
u16 *ptr16;
u32 *ptr32, len, addr;
pr_debug("Reading fw file ...\n");
while (record) {
rcnt++;
len = be16_to_cpu(record->len);
addr = be32_to_cpu(record->addr);
/* Point into data for different word lengths */
ptr32 = (u32 *)record->data;
ptr16 = (u16 *)record->data;
/* parse what was an S3 srec and put it in the right array */
switch (addr) {
case S3ADDR_START:
startaddr = *ptr32;
pr_debug(" S7 start addr, record=%d addr=0x%08x\n",
rcnt,
startaddr);
break;
case S3ADDR_PLUG:
s3plug[ns3plug].itemcode = *ptr32;
s3plug[ns3plug].addr = *(ptr32 + 1);
s3plug[ns3plug].len = *(ptr32 + 2);
pr_debug(" S3 plugrec, record=%d itemcode=0x%08x addr=0x%08x len=%d\n",
rcnt,
s3plug[ns3plug].itemcode,
s3plug[ns3plug].addr,
s3plug[ns3plug].len);
ns3plug++;
if (ns3plug == S3PLUG_MAX) {
pr_err("S3 plugrec limit reached - aborting\n");
return 1;
}
break;
case S3ADDR_CRC:
s3crc[ns3crc].addr = *ptr32;
s3crc[ns3crc].len = *(ptr32 + 1);
s3crc[ns3crc].dowrite = *(ptr32 + 2);
pr_debug(" S3 crcrec, record=%d addr=0x%08x len=%d write=0x%08x\n",
rcnt,
s3crc[ns3crc].addr,
s3crc[ns3crc].len,
s3crc[ns3crc].dowrite);
ns3crc++;
if (ns3crc == S3CRC_MAX) {
pr_err("S3 crcrec limit reached - aborting\n");
return 1;
}
break;
case S3ADDR_INFO:
s3info[ns3info].len = *ptr16;
s3info[ns3info].type = *(ptr16 + 1);
pr_debug(" S3 inforec, record=%d len=0x%04x type=0x%04x\n",
rcnt,
s3info[ns3info].len,
s3info[ns3info].type);
if (((s3info[ns3info].len - 1) * sizeof(u16)) >
sizeof(s3info[ns3info].info)) {
pr_err("S3 inforec length too long - aborting\n");
return 1;
}
tmpinfo = (u16 *)&s3info[ns3info].info.version;
pr_debug(" info=");
for (i = 0; i < s3info[ns3info].len - 1; i++) {
tmpinfo[i] = *(ptr16 + 2 + i);
pr_debug("%04x ", tmpinfo[i]);
}
pr_debug("\n");
ns3info++;
if (ns3info == S3INFO_MAX) {
pr_err("S3 inforec limit reached - aborting\n");
return 1;
}
break;
default: /* Data record */
s3data[ns3data].addr = addr;
s3data[ns3data].len = len;
s3data[ns3data].data = (uint8_t *)record->data;
ns3data++;
if (ns3data == S3DATA_MAX) {
pr_err("S3 datarec limit reached - aborting\n");
return 1;
}
break;
}
record = ihex_next_binrec(record);
}
return 0;
}
/*----------------------------------------------------------------
* writeimage
*
* Takes the chunks, builds p80211 messages and sends them down
* to the driver for writing to the card.
*
* Arguments:
* wlandev device
* fchunk Array of image chunks
* nfchunks Number of image chunks
*
* Returns:
* 0 success
* ~0 failure
*----------------------------------------------------------------
*/
static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
unsigned int nfchunks)
{
int result = 0;
struct p80211msg_p2req_ramdl_state *rstmsg;
struct p80211msg_p2req_ramdl_write *rwrmsg;
u32 resultcode;
int i;
int j;
unsigned int nwrites;
u32 curroff;
u32 currlen;
u32 currdaddr;
rstmsg = kzalloc(sizeof(*rstmsg), GFP_KERNEL);
rwrmsg = kzalloc(sizeof(*rwrmsg), GFP_KERNEL);
if (!rstmsg || !rwrmsg) {
netdev_err(wlandev->netdev,
"%s: no memory for firmware download, aborting download\n",
__func__);
result = -ENOMEM;
goto free_result;
}
/* Initialize the messages */
strscpy(rstmsg->devname, wlandev->name, sizeof(rstmsg->devname));
rstmsg->msgcode = DIDMSG_P2REQ_RAMDL_STATE;
rstmsg->msglen = sizeof(*rstmsg);
rstmsg->enable.did = DIDMSG_P2REQ_RAMDL_STATE_ENABLE;
rstmsg->exeaddr.did = DIDMSG_P2REQ_RAMDL_STATE_EXEADDR;
rstmsg->resultcode.did = DIDMSG_P2REQ_RAMDL_STATE_RESULTCODE;
rstmsg->enable.status = P80211ENUM_msgitem_status_data_ok;
rstmsg->exeaddr.status = P80211ENUM_msgitem_status_data_ok;
rstmsg->resultcode.status = P80211ENUM_msgitem_status_no_value;
rstmsg->enable.len = sizeof(u32);
rstmsg->exeaddr.len = sizeof(u32);
rstmsg->resultcode.len = sizeof(u32);
strscpy(rwrmsg->devname, wlandev->name, sizeof(rwrmsg->devname));
rwrmsg->msgcode = DIDMSG_P2REQ_RAMDL_WRITE;
rwrmsg->msglen = sizeof(*rwrmsg);
rwrmsg->addr.did = DIDMSG_P2REQ_RAMDL_WRITE_ADDR;
rwrmsg->len.did = DIDMSG_P2REQ_RAMDL_WRITE_LEN;
rwrmsg->data.did = DIDMSG_P2REQ_RAMDL_WRITE_DATA;
rwrmsg->resultcode.did = DIDMSG_P2REQ_RAMDL_WRITE_RESULTCODE;
rwrmsg->addr.status = P80211ENUM_msgitem_status_data_ok;
rwrmsg->len.status = P80211ENUM_msgitem_status_data_ok;
rwrmsg->data.status = P80211ENUM_msgitem_status_data_ok;
rwrmsg->resultcode.status = P80211ENUM_msgitem_status_no_value;
rwrmsg->addr.len = sizeof(u32);
rwrmsg->len.len = sizeof(u32);
rwrmsg->data.len = WRITESIZE_MAX;
rwrmsg->resultcode.len = sizeof(u32);
/* Send xxx_state(enable) */
pr_debug("Sending dl_state(enable) message.\n");
rstmsg->enable.data = P80211ENUM_truth_true;
rstmsg->exeaddr.data = startaddr;
result = prism2mgmt_ramdl_state(wlandev, rstmsg);
if (result) {
netdev_err(wlandev->netdev,
"%s state enable failed w/ result=%d, aborting download\n",
__func__, result);
goto free_result;
}
resultcode = rstmsg->resultcode.data;
if (resultcode != P80211ENUM_resultcode_success) {
netdev_err(wlandev->netdev,
"%s()->xxxdl_state msg indicates failure, w/ resultcode=%d, aborting download.\n",
__func__, resultcode);
result = 1;
goto free_result;
}
/* Now, loop through the data chunks and send WRITESIZE_MAX data */
for (i = 0; i < nfchunks; i++) {
nwrites = fchunk[i].len / WRITESIZE_MAX;
nwrites += (fchunk[i].len % WRITESIZE_MAX) ? 1 : 0;
curroff = 0;
for (j = 0; j < nwrites; j++) {
/* TODO Move this to a separate function */
int lenleft = fchunk[i].len - (WRITESIZE_MAX * j);
if (fchunk[i].len > WRITESIZE_MAX)
currlen = WRITESIZE_MAX;
else
currlen = lenleft;
curroff = j * WRITESIZE_MAX;
currdaddr = fchunk[i].addr + curroff;
/* Setup the message */
rwrmsg->addr.data = currdaddr;
rwrmsg->len.data = currlen;
memcpy(rwrmsg->data.data,
fchunk[i].data + curroff, currlen);
/* Send flashdl_write(pda) */
pr_debug
("Sending xxxdl_write message addr=%06x len=%d.\n",
currdaddr, currlen);
result = prism2mgmt_ramdl_write(wlandev, rwrmsg);
/* Check the results */
if (result) {
netdev_err(wlandev->netdev,
"%s chunk write failed w/ result=%d, aborting download\n",
__func__, result);
goto free_result;
}
resultcode = rstmsg->resultcode.data;
if (resultcode != P80211ENUM_resultcode_success) {
pr_err("%s()->xxxdl_write msg indicates failure, w/ resultcode=%d, aborting download.\n",
__func__, resultcode);
result = 1;
goto free_result;
}
}
}
/* Send xxx_state(disable) */
pr_debug("Sending dl_state(disable) message.\n");
rstmsg->enable.data = P80211ENUM_truth_false;
rstmsg->exeaddr.data = 0;
result = prism2mgmt_ramdl_state(wlandev, rstmsg);
if (result) {
netdev_err(wlandev->netdev,
"%s state disable failed w/ result=%d, aborting download\n",
__func__, result);
goto free_result;
}
resultcode = rstmsg->resultcode.data;
if (resultcode != P80211ENUM_resultcode_success) {
netdev_err(wlandev->netdev,
"%s()->xxxdl_state msg indicates failure, w/ resultcode=%d, aborting download.\n",
__func__, resultcode);
result = 1;
goto free_result;
}
free_result:
kfree(rstmsg);
kfree(rwrmsg);
return result;
}
static int validate_identity(void)
{
int i;
int result = 1;
int trump = 0;
pr_debug("NIC ID: %#x v%d.%d.%d\n",
nicid.id, nicid.major, nicid.minor, nicid.variant);
pr_debug("MFI ID: %#x v%d %d->%d\n",
rfid.id, rfid.variant, rfid.bottom, rfid.top);
pr_debug("CFI ID: %#x v%d %d->%d\n",
macid.id, macid.variant, macid.bottom, macid.top);
pr_debug("PRI ID: %#x v%d %d->%d\n",
priid.id, priid.variant, priid.bottom, priid.top);
for (i = 0; i < ns3info; i++) {
switch (s3info[i].type) {
case 1:
pr_debug("Version: ID %#x %d.%d.%d\n",
s3info[i].info.version.id,
s3info[i].info.version.major,
s3info[i].info.version.minor,
s3info[i].info.version.variant);
break;
case 2:
pr_debug("Compat: Role %#x Id %#x v%d %d->%d\n",
s3info[i].info.compat.role,
s3info[i].info.compat.id,
s3info[i].info.compat.variant,
s3info[i].info.compat.bottom,
s3info[i].info.compat.top);
/* MAC compat range */
if ((s3info[i].info.compat.role == 1) &&
(s3info[i].info.compat.id == 2)) {
if (s3info[i].info.compat.variant !=
macid.variant) {
result = 2;
}
}
/* PRI compat range */
if ((s3info[i].info.compat.role == 1) &&
(s3info[i].info.compat.id == 3)) {
if ((s3info[i].info.compat.bottom >
priid.top) ||
(s3info[i].info.compat.top <
priid.bottom)) {
result = 3;
}
}
/* SEC compat range */
if ((s3info[i].info.compat.role == 1) &&
(s3info[i].info.compat.id == 4)) {
/* FIXME: isn't something missing here? */
}
break;
case 3:
pr_debug("Seq: %#x\n", s3info[i].info.buildseq);
break;
case 4:
pr_debug("Platform: ID %#x %d.%d.%d\n",
s3info[i].info.version.id,
s3info[i].info.version.major,
s3info[i].info.version.minor,
s3info[i].info.version.variant);
if (nicid.id != s3info[i].info.version.id)
continue;
if (nicid.major != s3info[i].info.version.major)
continue;
if (nicid.minor != s3info[i].info.version.minor)
continue;
if ((nicid.variant != s3info[i].info.version.variant) &&
(nicid.id != 0x8008))
continue;
trump = 1;
break;
case 0x8001:
pr_debug("name inforec len %d\n", s3info[i].len);
break;
default:
pr_debug("Unknown inforec type %d\n", s3info[i].type);
}
}
/* walk through */
if (trump && (result != 2))
result = 0;
return result;
}
| linux-master | drivers/staging/wlan-ng/prism2fw.c |
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/*
*
* Linux Kernel net device interface
*
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
* --------------------------------------------------------------------
*
* linux-wlan
*
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
* made directly to:
*
* AbsoluteValue Systems Inc.
* [email protected]
* http://www.linux-wlan.com
*
* --------------------------------------------------------------------
*
* Portions of the development of this software were funded by
* Intersil Corporation as part of PRISM(R) chipset product development.
*
* --------------------------------------------------------------------
*
* The functions required for a Linux network device are defined here.
*
* --------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/kmod.h>
#include <linux/if_arp.h>
#include <linux/wireless.h>
#include <linux/sockios.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/byteorder/generic.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
#ifdef SIOCETHTOOL
#include <linux/ethtool.h>
#endif
#include <net/iw_handler.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
#include "p80211types.h"
#include "p80211hdr.h"
#include "p80211conv.h"
#include "p80211mgmt.h"
#include "p80211msg.h"
#include "p80211netdev.h"
#include "p80211ioctl.h"
#include "p80211req.h"
#include "p80211metastruct.h"
#include "p80211metadef.h"
#include "cfg80211.c"
/* netdevice method functions */
static int p80211knetdev_init(struct net_device *netdev);
static int p80211knetdev_open(struct net_device *netdev);
static int p80211knetdev_stop(struct net_device *netdev);
static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev);
static void p80211knetdev_set_multicast_list(struct net_device *dev);
static int p80211knetdev_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd);
static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr);
static void p80211knetdev_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc);
int wlan_watchdog = 5000;
module_param(wlan_watchdog, int, 0644);
MODULE_PARM_DESC(wlan_watchdog, "transmit timeout in milliseconds");
int wlan_wext_write = 1;
module_param(wlan_wext_write, int, 0644);
MODULE_PARM_DESC(wlan_wext_write, "enable write wireless extensions");
/*----------------------------------------------------------------
* p80211knetdev_init
*
* Init method for a Linux netdevice. Called in response to
* register_netdev.
*
* Arguments:
* none
*
* Returns:
* nothing
*----------------------------------------------------------------
*/
static int p80211knetdev_init(struct net_device *netdev)
{
/* Called in response to register_netdev */
/* This is usually the probe function, but the probe has */
/* already been done by the MSD and the create_kdev */
/* function. All we do here is return success */
return 0;
}
/*----------------------------------------------------------------
* p80211knetdev_open
*
* Linux netdevice open method. Following a successful call here,
* the device is supposed to be ready for tx and rx. In our
* situation that may not be entirely true due to the state of the
* MAC below.
*
* Arguments:
* netdev Linux network device structure
*
* Returns:
* zero on success, non-zero otherwise
*----------------------------------------------------------------
*/
static int p80211knetdev_open(struct net_device *netdev)
{
int result = 0; /* success */
struct wlandevice *wlandev = netdev->ml_priv;
/* Check to make sure the MSD is running */
if (wlandev->msdstate != WLAN_MSD_RUNNING)
return -ENODEV;
/* Tell the MSD to open */
if (wlandev->open) {
result = wlandev->open(wlandev);
if (result == 0) {
netif_start_queue(wlandev->netdev);
wlandev->state = WLAN_DEVICE_OPEN;
}
} else {
result = -EAGAIN;
}
return result;
}
/*----------------------------------------------------------------
* p80211knetdev_stop
*
* Linux netdevice stop (close) method. Following this call,
* no frames should go up or down through this interface.
*
* Arguments:
* netdev Linux network device structure
*
* Returns:
* zero on success, non-zero otherwise
*----------------------------------------------------------------
*/
static int p80211knetdev_stop(struct net_device *netdev)
{
int result = 0;
struct wlandevice *wlandev = netdev->ml_priv;
if (wlandev->close)
result = wlandev->close(wlandev);
netif_stop_queue(wlandev->netdev);
wlandev->state = WLAN_DEVICE_CLOSED;
return result;
}
/*----------------------------------------------------------------
* p80211netdev_rx
*
* Frame receive function called by the mac specific driver.
*
* Arguments:
* wlandev WLAN network device structure
* skb skbuff containing a full 802.11 frame.
* Returns:
* nothing
* Side effects:
*
*----------------------------------------------------------------
*/
void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb)
{
/* Enqueue for post-irq processing */
skb_queue_tail(&wlandev->nsd_rxq, skb);
tasklet_schedule(&wlandev->rx_bh);
}
#define CONV_TO_ETHER_SKIPPED 0x01
#define CONV_TO_ETHER_FAILED 0x02
/**
* p80211_convert_to_ether - conversion from 802.11 frame to ethernet frame
* @wlandev: pointer to WLAN device
* @skb: pointer to socket buffer
*
* Returns: 0 if conversion succeeded
* CONV_TO_ETHER_FAILED if conversion failed
* CONV_TO_ETHER_SKIPPED if frame is ignored
*/
static int p80211_convert_to_ether(struct wlandevice *wlandev,
struct sk_buff *skb)
{
struct p80211_hdr *hdr;
hdr = (struct p80211_hdr *)skb->data;
if (p80211_rx_typedrop(wlandev, le16_to_cpu(hdr->frame_control)))
return CONV_TO_ETHER_SKIPPED;
/* perform mcast filtering: allow my local address through but reject
* anything else that isn't multicast
*/
if (wlandev->netdev->flags & IFF_ALLMULTI) {
if (!ether_addr_equal_unaligned(wlandev->netdev->dev_addr,
hdr->address1)) {
if (!is_multicast_ether_addr(hdr->address1))
return CONV_TO_ETHER_SKIPPED;
}
}
if (skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0) {
wlandev->netdev->stats.rx_packets++;
wlandev->netdev->stats.rx_bytes += skb->len;
netif_rx(skb);
return 0;
}
netdev_dbg(wlandev->netdev, "%s failed.\n", __func__);
return CONV_TO_ETHER_FAILED;
}
/**
* p80211netdev_rx_bh - deferred processing of all received frames
*
* @t: pointer to the tasklet associated with this handler
*/
static void p80211netdev_rx_bh(struct tasklet_struct *t)
{
struct wlandevice *wlandev = from_tasklet(wlandev, t, rx_bh);
struct sk_buff *skb = NULL;
struct net_device *dev = wlandev->netdev;
/* Let's empty our queue */
while ((skb = skb_dequeue(&wlandev->nsd_rxq))) {
if (wlandev->state == WLAN_DEVICE_OPEN) {
if (dev->type != ARPHRD_ETHER) {
/* RAW frame; we shouldn't convert it */
/* XXX Append the Prism Header here instead. */
/* set up various data fields */
skb->dev = dev;
skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_NONE;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_80211_RAW);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
netif_rx(skb);
continue;
} else {
if (!p80211_convert_to_ether(wlandev, skb))
continue;
}
}
dev_kfree_skb(skb);
}
}
/*----------------------------------------------------------------
* p80211knetdev_hard_start_xmit
*
* Linux netdevice method for transmitting a frame.
*
* Arguments:
* skb Linux sk_buff containing the frame.
* netdev Linux netdevice.
*
* Side effects:
* If the lower layers report that buffers are full. netdev->tbusy
* will be set to prevent higher layers from sending more traffic.
*
* Note: If this function returns non-zero, higher layers retain
* ownership of the skb.
*
* Returns:
* zero on success, non-zero on failure.
*----------------------------------------------------------------
*/
static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
int result = 0;
int txresult;
struct wlandevice *wlandev = netdev->ml_priv;
struct p80211_hdr p80211_hdr;
struct p80211_metawep p80211_wep;
p80211_wep.data = NULL;
if (!skb)
return NETDEV_TX_OK;
if (wlandev->state != WLAN_DEVICE_OPEN) {
result = 1;
goto failed;
}
memset(&p80211_hdr, 0, sizeof(p80211_hdr));
memset(&p80211_wep, 0, sizeof(p80211_wep));
if (netif_queue_stopped(netdev)) {
netdev_dbg(netdev, "called when queue stopped.\n");
result = 1;
goto failed;
}
netif_stop_queue(netdev);
/* Check to see that a valid mode is set */
switch (wlandev->macmode) {
case WLAN_MACMODE_IBSS_STA:
case WLAN_MACMODE_ESS_STA:
case WLAN_MACMODE_ESS_AP:
break;
default:
/* Mode isn't set yet, just drop the frame
* and return success .
* TODO: we need a saner way to handle this
*/
if (be16_to_cpu(skb->protocol) != ETH_P_80211_RAW) {
netif_start_queue(wlandev->netdev);
netdev_notice(netdev, "Tx attempt prior to association, frame dropped.\n");
netdev->stats.tx_dropped++;
result = 0;
goto failed;
}
break;
}
/* Check for raw transmits */
if (be16_to_cpu(skb->protocol) == ETH_P_80211_RAW) {
if (!capable(CAP_NET_ADMIN)) {
result = 1;
goto failed;
}
/* move the header over */
memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr));
skb_pull(skb, sizeof(p80211_hdr));
} else {
if (skb_ether_to_p80211
(wlandev, wlandev->ethconv, skb, &p80211_hdr,
&p80211_wep) != 0) {
/* convert failed */
netdev_dbg(netdev, "ether_to_80211(%d) failed.\n",
wlandev->ethconv);
result = 1;
goto failed;
}
}
if (!wlandev->txframe) {
result = 1;
goto failed;
}
netif_trans_update(netdev);
netdev->stats.tx_packets++;
/* count only the packet payload */
netdev->stats.tx_bytes += skb->len;
txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep);
if (txresult == 0) {
/* success and more buf */
/* avail, re: hw_txdata */
netif_wake_queue(wlandev->netdev);
result = NETDEV_TX_OK;
} else if (txresult == 1) {
/* success, no more avail */
netdev_dbg(netdev, "txframe success, no more bufs\n");
/* netdev->tbusy = 1; don't set here, irqhdlr */
/* may have already cleared it */
result = NETDEV_TX_OK;
} else if (txresult == 2) {
/* alloc failure, drop frame */
netdev_dbg(netdev, "txframe returned alloc_fail\n");
result = NETDEV_TX_BUSY;
} else {
/* buffer full or queue busy, drop frame. */
netdev_dbg(netdev, "txframe returned full or busy\n");
result = NETDEV_TX_BUSY;
}
failed:
/* Free up the WEP buffer if it's not the same as the skb */
if ((p80211_wep.data) && (p80211_wep.data != skb->data))
kfree_sensitive(p80211_wep.data);
/* we always free the skb here, never in a lower level. */
if (!result)
dev_kfree_skb(skb);
return result;
}
/*----------------------------------------------------------------
* p80211knetdev_set_multicast_list
*
* Called from higher layers whenever there's a need to set/clear
* promiscuous mode or rewrite the multicast list.
*
* Arguments:
* none
*
* Returns:
* nothing
*----------------------------------------------------------------
*/
static void p80211knetdev_set_multicast_list(struct net_device *dev)
{
struct wlandevice *wlandev = dev->ml_priv;
/* TODO: real multicast support as well */
if (wlandev->set_multicast_list)
wlandev->set_multicast_list(wlandev, dev);
}
/*----------------------------------------------------------------
* p80211knetdev_siocdevprivate
*
* Handle an ioctl call on one of our devices. Everything Linux
* ioctl specific is done here. Then we pass the contents of the
* ifr->data to the request message handler.
*
* Arguments:
* dev Linux kernel netdevice
* ifr Our private ioctl request structure, typed for the
* generic struct ifreq so we can use ptr to func
* w/o cast.
*
* Returns:
* zero on success, a negative errno on failure. Possible values:
* -ENETDOWN Device isn't up.
* -EBUSY cmd already in progress
* -ETIME p80211 cmd timed out (MSD may have its own timers)
* -EFAULT memory fault copying msg from user buffer
* -ENOMEM unable to allocate kernel msg buffer
* -EINVAL bad magic, it the cmd really for us?
* -EintR sleeping on cmd, awakened by signal, cmd cancelled.
*
* Call Context:
* Process thread (ioctl caller). TODO: SMP support may require
* locks.
*----------------------------------------------------------------
*/
static int p80211knetdev_siocdevprivate(struct net_device *dev,
struct ifreq *ifr,
void __user *data, int cmd)
{
int result = 0;
struct p80211ioctl_req *req = (struct p80211ioctl_req *)ifr;
struct wlandevice *wlandev = dev->ml_priv;
u8 *msgbuf;
netdev_dbg(dev, "rx'd ioctl, cmd=%d, len=%d\n", cmd, req->len);
if (in_compat_syscall())
return -EOPNOTSUPP;
/* Test the magic, assume ifr is good if it's there */
if (req->magic != P80211_IOCTL_MAGIC) {
result = -EINVAL;
goto bail;
}
if (cmd == P80211_IFTEST) {
result = 0;
goto bail;
} else if (cmd != P80211_IFREQ) {
result = -EINVAL;
goto bail;
}
msgbuf = memdup_user(data, req->len);
if (IS_ERR(msgbuf)) {
result = PTR_ERR(msgbuf);
goto bail;
}
result = p80211req_dorequest(wlandev, msgbuf);
if (result == 0) {
if (copy_to_user(data, msgbuf, req->len))
result = -EFAULT;
}
kfree(msgbuf);
bail:
/* If allocate,copyfrom or copyto fails, return errno */
return result;
}
/*----------------------------------------------------------------
* p80211knetdev_set_mac_address
*
* Handles the ioctl for changing the MACAddress of a netdevice
*
* references: linux/netdevice.h and drivers/net/net_init.c
*
* NOTE: [MSM] We only prevent address changes when the netdev is
* up. We don't control anything based on dot11 state. If the
* address is changed on a STA that's currently associated, you
* will probably lose the ability to send and receive data frames.
* Just be aware. Therefore, this should usually only be done
* prior to scan/join/auth/assoc.
*
* Arguments:
* dev netdevice struct
* addr the new MACAddress (a struct)
*
* Returns:
* zero on success, a negative errno on failure. Possible values:
* -EBUSY device is bussy (cmd not possible)
* -and errors returned by: p80211req_dorequest(..)
*
* by: Collin R. Mulliner <[email protected]>
*----------------------------------------------------------------
*/
static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *new_addr = addr;
struct p80211msg_dot11req_mibset dot11req;
struct p80211item_unk392 *mibattr;
struct p80211item_pstr6 *macaddr;
struct p80211item_uint32 *resultcode;
int result;
/* If we're running, we don't allow MAC address changes */
if (netif_running(dev))
return -EBUSY;
/* Set up some convenience pointers. */
mibattr = &dot11req.mibattribute;
macaddr = (struct p80211item_pstr6 *)&mibattr->data;
resultcode = &dot11req.resultcode;
/* Set up a dot11req_mibset */
memset(&dot11req, 0, sizeof(dot11req));
dot11req.msgcode = DIDMSG_DOT11REQ_MIBSET;
dot11req.msglen = sizeof(dot11req);
memcpy(dot11req.devname,
((struct wlandevice *)dev->ml_priv)->name,
WLAN_DEVNAMELEN_MAX - 1);
/* Set up the mibattribute argument */
mibattr->did = DIDMSG_DOT11REQ_MIBSET_MIBATTRIBUTE;
mibattr->status = P80211ENUM_msgitem_status_data_ok;
mibattr->len = sizeof(mibattr->data);
macaddr->did = DIDMIB_DOT11MAC_OPERATIONTABLE_MACADDRESS;
macaddr->status = P80211ENUM_msgitem_status_data_ok;
macaddr->len = sizeof(macaddr->data);
macaddr->data.len = ETH_ALEN;
memcpy(&macaddr->data.data, new_addr->sa_data, ETH_ALEN);
/* Set up the resultcode argument */
resultcode->did = DIDMSG_DOT11REQ_MIBSET_RESULTCODE;
resultcode->status = P80211ENUM_msgitem_status_no_value;
resultcode->len = sizeof(resultcode->data);
resultcode->data = 0;
/* now fire the request */
result = p80211req_dorequest(dev->ml_priv, (u8 *)&dot11req);
/* If the request wasn't successful, report an error and don't
* change the netdev address
*/
if (result != 0 || resultcode->data != P80211ENUM_resultcode_success) {
netdev_err(dev, "Low-level driver failed dot11req_mibset(dot11MACAddress).\n");
result = -EADDRNOTAVAIL;
} else {
/* everything's ok, change the addr in netdev */
eth_hw_addr_set(dev, new_addr->sa_data);
}
return result;
}
static const struct net_device_ops p80211_netdev_ops = {
.ndo_init = p80211knetdev_init,
.ndo_open = p80211knetdev_open,
.ndo_stop = p80211knetdev_stop,
.ndo_start_xmit = p80211knetdev_hard_start_xmit,
.ndo_set_rx_mode = p80211knetdev_set_multicast_list,
.ndo_siocdevprivate = p80211knetdev_siocdevprivate,
.ndo_set_mac_address = p80211knetdev_set_mac_address,
.ndo_tx_timeout = p80211knetdev_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
};
/*----------------------------------------------------------------
* wlan_setup
*
* Roughly matches the functionality of ether_setup. Here
* we set up any members of the wlandevice structure that are common
* to all devices. Additionally, we allocate a linux 'struct device'
* and perform the same setup as ether_setup.
*
* Note: It's important that the caller have setup the wlandev->name
* ptr prior to calling this function.
*
* Arguments:
* wlandev ptr to the wlandev structure for the
* interface.
* physdev ptr to usb device
* Returns:
* zero on success, non-zero otherwise.
* Call Context:
* Should be process thread. We'll assume it might be
* interrupt though. When we add support for statically
* compiled drivers, this function will be called in the
* context of the kernel startup code.
*----------------------------------------------------------------
*/
int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
{
int result = 0;
struct net_device *netdev;
struct wiphy *wiphy;
struct wireless_dev *wdev;
/* Set up the wlandev */
wlandev->state = WLAN_DEVICE_CLOSED;
wlandev->ethconv = WLAN_ETHCONV_8021h;
wlandev->macmode = WLAN_MACMODE_NONE;
/* Set up the rx queue */
skb_queue_head_init(&wlandev->nsd_rxq);
tasklet_setup(&wlandev->rx_bh, p80211netdev_rx_bh);
/* Allocate and initialize the wiphy struct */
wiphy = wlan_create_wiphy(physdev, wlandev);
if (!wiphy) {
dev_err(physdev, "Failed to alloc wiphy.\n");
return 1;
}
/* Allocate and initialize the struct device */
netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d",
NET_NAME_UNKNOWN, ether_setup);
if (!netdev) {
dev_err(physdev, "Failed to alloc netdev.\n");
wlan_free_wiphy(wiphy);
result = 1;
} else {
wlandev->netdev = netdev;
netdev->ml_priv = wlandev;
netdev->netdev_ops = &p80211_netdev_ops;
wdev = netdev_priv(netdev);
wdev->wiphy = wiphy;
wdev->iftype = NL80211_IFTYPE_STATION;
netdev->ieee80211_ptr = wdev;
netdev->min_mtu = 68;
/* 2312 is max 802.11 payload, 20 is overhead,
* (ether + llc + snap) and another 8 for wep.
*/
netdev->max_mtu = (2312 - 20 - 8);
netif_stop_queue(netdev);
netif_carrier_off(netdev);
}
return result;
}
/*----------------------------------------------------------------
* wlan_unsetup
*
* This function is paired with the wlan_setup routine. It should
* be called after unregister_wlandev. Basically, all it does is
* free the 'struct device' that's associated with the wlandev.
* We do it here because the 'struct device' isn't allocated
* explicitly in the driver code, it's done in wlan_setup. To
* do the free in the driver might seem like 'magic'.
*
* Arguments:
* wlandev ptr to the wlandev structure for the
* interface.
* Call Context:
* Should be process thread. We'll assume it might be
* interrupt though. When we add support for statically
* compiled drivers, this function will be called in the
* context of the kernel startup code.
*----------------------------------------------------------------
*/
void wlan_unsetup(struct wlandevice *wlandev)
{
struct wireless_dev *wdev;
tasklet_kill(&wlandev->rx_bh);
if (wlandev->netdev) {
wdev = netdev_priv(wlandev->netdev);
if (wdev->wiphy)
wlan_free_wiphy(wdev->wiphy);
free_netdev(wlandev->netdev);
wlandev->netdev = NULL;
}
}
/*----------------------------------------------------------------
* register_wlandev
*
* Roughly matches the functionality of register_netdev. This function
* is called after the driver has successfully probed and set up the
* resources for the device. It's now ready to become a named device
* in the Linux system.
*
* First we allocate a name for the device (if not already set), then
* we call the Linux function register_netdevice.
*
* Arguments:
* wlandev ptr to the wlandev structure for the
* interface.
* Returns:
* zero on success, non-zero otherwise.
* Call Context:
* Can be either interrupt or not.
*----------------------------------------------------------------
*/
int register_wlandev(struct wlandevice *wlandev)
{
return register_netdev(wlandev->netdev);
}
/*----------------------------------------------------------------
* unregister_wlandev
*
* Roughly matches the functionality of unregister_netdev. This
* function is called to remove a named device from the system.
*
* First we tell linux that the device should no longer exist.
* Then we remove it from the list of known wlan devices.
*
* Arguments:
* wlandev ptr to the wlandev structure for the
* interface.
* Returns:
* zero on success, non-zero otherwise.
* Call Context:
* Can be either interrupt or not.
*----------------------------------------------------------------
*/
int unregister_wlandev(struct wlandevice *wlandev)
{
struct sk_buff *skb;
unregister_netdev(wlandev->netdev);
/* Now to clean out the rx queue */
while ((skb = skb_dequeue(&wlandev->nsd_rxq)))
dev_kfree_skb(skb);
return 0;
}
/*----------------------------------------------------------------
* p80211netdev_hwremoved
*
* Hardware removed notification. This function should be called
* immediately after an MSD has detected that the underlying hardware
* has been yanked out from under us. The primary things we need
* to do are:
* - Mark the wlandev
* - Prevent any further traffic from the knetdev i/f
* - Prevent any further requests from mgmt i/f
* - If there are any waitq'd mgmt requests or mgmt-frame exchanges,
* shut them down.
* - Call the MSD hwremoved function.
*
* The remainder of the cleanup will be handled by unregister().
* Our primary goal here is to prevent as much tickling of the MSD
* as possible since the MSD is already in a 'wounded' state.
*
* TODO: As new features are added, this function should be
* updated.
*
* Arguments:
* wlandev WLAN network device structure
* Returns:
* nothing
* Side effects:
*
* Call context:
* Usually interrupt.
*----------------------------------------------------------------
*/
void p80211netdev_hwremoved(struct wlandevice *wlandev)
{
wlandev->hwremoved = 1;
if (wlandev->state == WLAN_DEVICE_OPEN)
netif_stop_queue(wlandev->netdev);
netif_device_detach(wlandev->netdev);
}
/*----------------------------------------------------------------
* p80211_rx_typedrop
*
* Classifies the frame, increments the appropriate counter, and
* returns 0|1|2 indicating whether the driver should handle, ignore, or
* drop the frame
*
* Arguments:
* wlandev wlan device structure
* fc frame control field
*
* Returns:
* zero if the frame should be handled by the driver,
* one if the frame should be ignored
* anything else means we drop it.
*
* Side effects:
*
* Call context:
* interrupt
*----------------------------------------------------------------
*/
static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc)
{
u16 ftype;
u16 fstype;
int drop = 0;
/* Classify frame, increment counter */
ftype = WLAN_GET_FC_FTYPE(fc);
fstype = WLAN_GET_FC_FSTYPE(fc);
switch (ftype) {
case WLAN_FTYPE_MGMT:
if ((wlandev->netdev->flags & IFF_PROMISC) ||
(wlandev->netdev->flags & IFF_ALLMULTI)) {
drop = 1;
break;
}
netdev_dbg(wlandev->netdev, "rx'd mgmt:\n");
wlandev->rx.mgmt++;
switch (fstype) {
case WLAN_FSTYPE_ASSOCREQ:
wlandev->rx.assocreq++;
break;
case WLAN_FSTYPE_ASSOCRESP:
wlandev->rx.assocresp++;
break;
case WLAN_FSTYPE_REASSOCREQ:
wlandev->rx.reassocreq++;
break;
case WLAN_FSTYPE_REASSOCRESP:
wlandev->rx.reassocresp++;
break;
case WLAN_FSTYPE_PROBEREQ:
wlandev->rx.probereq++;
break;
case WLAN_FSTYPE_PROBERESP:
wlandev->rx.proberesp++;
break;
case WLAN_FSTYPE_BEACON:
wlandev->rx.beacon++;
break;
case WLAN_FSTYPE_ATIM:
wlandev->rx.atim++;
break;
case WLAN_FSTYPE_DISASSOC:
wlandev->rx.disassoc++;
break;
case WLAN_FSTYPE_AUTHEN:
wlandev->rx.authen++;
break;
case WLAN_FSTYPE_DEAUTHEN:
wlandev->rx.deauthen++;
break;
default:
wlandev->rx.mgmt_unknown++;
break;
}
drop = 2;
break;
case WLAN_FTYPE_CTL:
if ((wlandev->netdev->flags & IFF_PROMISC) ||
(wlandev->netdev->flags & IFF_ALLMULTI)) {
drop = 1;
break;
}
netdev_dbg(wlandev->netdev, "rx'd ctl:\n");
wlandev->rx.ctl++;
switch (fstype) {
case WLAN_FSTYPE_PSPOLL:
wlandev->rx.pspoll++;
break;
case WLAN_FSTYPE_RTS:
wlandev->rx.rts++;
break;
case WLAN_FSTYPE_CTS:
wlandev->rx.cts++;
break;
case WLAN_FSTYPE_ACK:
wlandev->rx.ack++;
break;
case WLAN_FSTYPE_CFEND:
wlandev->rx.cfend++;
break;
case WLAN_FSTYPE_CFENDCFACK:
wlandev->rx.cfendcfack++;
break;
default:
wlandev->rx.ctl_unknown++;
break;
}
drop = 2;
break;
case WLAN_FTYPE_DATA:
wlandev->rx.data++;
switch (fstype) {
case WLAN_FSTYPE_DATAONLY:
wlandev->rx.dataonly++;
break;
case WLAN_FSTYPE_DATA_CFACK:
wlandev->rx.data_cfack++;
break;
case WLAN_FSTYPE_DATA_CFPOLL:
wlandev->rx.data_cfpoll++;
break;
case WLAN_FSTYPE_DATA_CFACK_CFPOLL:
wlandev->rx.data__cfack_cfpoll++;
break;
case WLAN_FSTYPE_NULL:
netdev_dbg(wlandev->netdev, "rx'd data:null\n");
wlandev->rx.null++;
break;
case WLAN_FSTYPE_CFACK:
netdev_dbg(wlandev->netdev, "rx'd data:cfack\n");
wlandev->rx.cfack++;
break;
case WLAN_FSTYPE_CFPOLL:
netdev_dbg(wlandev->netdev, "rx'd data:cfpoll\n");
wlandev->rx.cfpoll++;
break;
case WLAN_FSTYPE_CFACK_CFPOLL:
netdev_dbg(wlandev->netdev, "rx'd data:cfack_cfpoll\n");
wlandev->rx.cfack_cfpoll++;
break;
default:
wlandev->rx.data_unknown++;
break;
}
break;
}
return drop;
}
static void p80211knetdev_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct wlandevice *wlandev = netdev->ml_priv;
if (wlandev->tx_timeout) {
wlandev->tx_timeout(wlandev);
} else {
netdev_warn(netdev, "Implement tx_timeout for %s\n",
wlandev->nsdname);
netif_wake_queue(wlandev->netdev);
}
}
| linux-master | drivers/staging/wlan-ng/p80211netdev.c |
// SPDX-License-Identifier: GPL-2.0
#include "hfa384x_usb.c"
#include "prism2mgmt.c"
#include "prism2mib.c"
#include "prism2sta.c"
#include "prism2fw.c"
#define PRISM_DEV(vid, pid, name) \
{ USB_DEVICE(vid, pid), \
.driver_info = (unsigned long)name }
static const struct usb_device_id usb_prism_tbl[] = {
PRISM_DEV(0x04bb, 0x0922, "IOData AirPort WN-B11/USBS"),
PRISM_DEV(0x07aa, 0x0012, "Corega USB Wireless LAN Stick-11"),
PRISM_DEV(0x09aa, 0x3642, "Prism2.x 11Mbps USB WLAN Adapter"),
PRISM_DEV(0x1668, 0x0408, "Actiontec Prism2.5 11Mbps USB WLAN Adapter"),
PRISM_DEV(0x1668, 0x0421, "Actiontec Prism2.5 11Mbps USB WLAN Adapter"),
PRISM_DEV(0x1915, 0x2236, "Linksys WUSB11v3.0 11Mbps USB WLAN Adapter"),
PRISM_DEV(0x066b, 0x2212, "Linksys WUSB11v2.5 11Mbps USB WLAN Adapter"),
PRISM_DEV(0x066b, 0x2213, "Linksys WUSB12v1.1 11Mbps USB WLAN Adapter"),
PRISM_DEV(0x0411, 0x0016, "Melco WLI-USB-S11 11Mbps WLAN Adapter"),
PRISM_DEV(0x08de, 0x7a01, "PRISM25 USB IEEE 802.11 Mini Adapter"),
PRISM_DEV(0x8086, 0x1111, "Intel PRO/Wireless 2011B USB LAN Adapter"),
PRISM_DEV(0x0d8e, 0x7a01, "PRISM25 IEEE 802.11 Mini USB Adapter"),
PRISM_DEV(0x045e, 0x006e, "Microsoft MN510 USB Wireless Adapter"),
PRISM_DEV(0x0967, 0x0204, "Acer Warplink USB Adapter"),
PRISM_DEV(0x0cde, 0x0002, "Z-Com 725/726 Prism2.5 USB/USB Integrated"),
PRISM_DEV(0x0cde, 0x0005, "Z-Com Xl735 USB Wireless 802.11b Adapter"),
PRISM_DEV(0x413c, 0x8100, "Dell TrueMobile 1180 USB Wireless Adapter"),
PRISM_DEV(0x0b3b, 0x1601, "ALLNET 0193 11Mbps USB WLAN Adapter"),
PRISM_DEV(0x0b3b, 0x1602, "ZyXEL ZyAIR B200 USB Wireless Adapter"),
PRISM_DEV(0x0baf, 0x00eb, "USRobotics USR1120 USB Wireless Adapter"),
PRISM_DEV(0x0411, 0x0027, "Melco WLI-USB-KS11G 11Mbps WLAN Adapter"),
PRISM_DEV(0x04f1, 0x3009, "JVC MP-XP7250 Builtin USB WLAN Adapter"),
PRISM_DEV(0x0846, 0x4110, "NetGear MA111"),
PRISM_DEV(0x03f3, 0x0020, "Adaptec AWN-8020 USB WLAN Adapter"),
PRISM_DEV(0x2821, 0x3300, "ASUS-WL140 / Hawking HighDB USB Wireless Adapter"),
PRISM_DEV(0x2001, 0x3700, "DWL-122 USB Wireless Adapter"),
PRISM_DEV(0x2001, 0x3702, "DWL-120 Rev F USB Wireless Adapter"),
PRISM_DEV(0x50c2, 0x4013, "Averatec USB WLAN Adapter"),
PRISM_DEV(0x2c02, 0x14ea, "Planex GW-US11H USB WLAN Adapter"),
PRISM_DEV(0x124a, 0x168b, "Airvast PRISM3 USB WLAN Adapter"),
PRISM_DEV(0x083a, 0x3503, "T-Sinus 111 USB WLAN Adapter"),
PRISM_DEV(0x0411, 0x0044, "Melco WLI-USB-KB11 11Mbps WLAN Adapter"),
PRISM_DEV(0x1668, 0x6106, "ROPEX FreeLan USB 802.11b Adapter"),
PRISM_DEV(0x124a, 0x4017, "Pheenet WL-503IA USB 802.11b Adapter"),
PRISM_DEV(0x0bb2, 0x0302, "Ambit Microsystems Corp."),
PRISM_DEV(0x9016, 0x182d, "Sitecom WL-022 USB 802.11b Adapter"),
PRISM_DEV(0x0543, 0x0f01,
"ViewSonic Airsync USB Adapter 11Mbps (Prism2.5)"),
PRISM_DEV(0x067c, 0x1022,
"Siemens SpeedStream 1022 11Mbps USB WLAN Adapter"),
PRISM_DEV(0x049f, 0x0033,
"Compaq/Intel W100 PRO/Wireless 11Mbps multiport WLAN Adapter"),
{ } /* terminator */
};
MODULE_DEVICE_TABLE(usb, usb_prism_tbl);
static int prism2sta_probe_usb(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *dev;
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct wlandevice *wlandev = NULL;
struct hfa384x *hw = NULL;
int result = 0;
result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL);
if (result)
goto failed;
dev = interface_to_usbdev(interface);
wlandev = create_wlan();
if (!wlandev) {
dev_err(&interface->dev, "Memory allocation failure.\n");
result = -EIO;
goto failed;
}
hw = wlandev->priv;
if (wlan_setup(wlandev, &interface->dev) != 0) {
dev_err(&interface->dev, "wlan_setup() failed.\n");
result = -EIO;
goto failed;
}
/* Initialize the hw data */
hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress);
hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress);
hfa384x_create(hw, dev);
hw->wlandev = wlandev;
/* Register the wlandev, this gets us a name and registers the
* linux netdevice.
*/
SET_NETDEV_DEV(wlandev->netdev, &interface->dev);
/* Do a chip-level reset on the MAC */
if (prism2_doreset) {
result = hfa384x_corereset(hw,
prism2_reset_holdtime,
prism2_reset_settletime, 0);
if (result != 0) {
result = -EIO;
dev_err(&interface->dev,
"hfa384x_corereset() failed.\n");
goto failed_reset;
}
}
usb_get_dev(dev);
wlandev->msdstate = WLAN_MSD_HWPRESENT;
/* Try and load firmware, then enable card before we register */
prism2_fwtry(dev, wlandev);
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_enable);
if (register_wlandev(wlandev) != 0) {
dev_err(&interface->dev, "register_wlandev() failed.\n");
result = -EIO;
goto failed_register;
}
goto done;
failed_register:
usb_put_dev(dev);
failed_reset:
wlan_unsetup(wlandev);
failed:
kfree(wlandev);
kfree(hw);
wlandev = NULL;
done:
usb_set_intfdata(interface, wlandev);
return result;
}
static void prism2sta_disconnect_usb(struct usb_interface *interface)
{
struct wlandevice *wlandev;
wlandev = usb_get_intfdata(interface);
if (wlandev) {
LIST_HEAD(cleanlist);
struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
struct hfa384x *hw = wlandev->priv;
if (!hw)
goto exit;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
p80211netdev_hwremoved(wlandev);
list_splice_init(&hw->ctlxq.reapable, &cleanlist);
list_splice_init(&hw->ctlxq.completing, &cleanlist);
list_splice_init(&hw->ctlxq.pending, &cleanlist);
list_splice_init(&hw->ctlxq.active, &cleanlist);
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
/* There's no hardware to shutdown, but the driver
* might have some tasks that must be stopped before
* we can tear everything down.
*/
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable);
timer_shutdown_sync(&hw->throttle);
timer_shutdown_sync(&hw->reqtimer);
timer_shutdown_sync(&hw->resptimer);
/* Unlink all the URBs. This "removes the wheels"
* from the entire CTLX handling mechanism.
*/
usb_kill_urb(&hw->rx_urb);
usb_kill_urb(&hw->tx_urb);
usb_kill_urb(&hw->ctlx_urb);
cancel_work_sync(&hw->completion_bh);
cancel_work_sync(&hw->reaper_bh);
cancel_work_sync(&hw->link_bh);
cancel_work_sync(&hw->commsqual_bh);
cancel_work_sync(&hw->usb_work);
/* Now we complete any outstanding commands
* and tell everyone who is waiting for their
* responses that we have shut down.
*/
list_for_each_entry(ctlx, &cleanlist, list)
complete(&ctlx->done);
/* Give any outstanding synchronous commands
* a chance to complete. All they need to do
* is "wake up", so that's easy.
* (I'd like a better way to do this, really.)
*/
msleep(100);
/* Now delete the CTLXs, because no-one else can now. */
list_for_each_entry_safe(ctlx, temp, &cleanlist, list)
kfree(ctlx);
/* Unhook the wlandev */
unregister_wlandev(wlandev);
wlan_unsetup(wlandev);
usb_put_dev(hw->usb);
hfa384x_destroy(hw);
kfree(hw);
kfree(wlandev);
}
exit:
usb_set_intfdata(interface, NULL);
}
#ifdef CONFIG_PM
static int prism2sta_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct hfa384x *hw = NULL;
struct wlandevice *wlandev;
wlandev = usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
hw = wlandev->priv;
if (!hw)
return -ENODEV;
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable);
usb_kill_urb(&hw->rx_urb);
usb_kill_urb(&hw->tx_urb);
usb_kill_urb(&hw->ctlx_urb);
return 0;
}
static int prism2sta_resume(struct usb_interface *interface)
{
int result = 0;
struct hfa384x *hw = NULL;
struct wlandevice *wlandev;
wlandev = usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
hw = wlandev->priv;
if (!hw)
return -ENODEV;
/* Do a chip-level reset on the MAC */
if (prism2_doreset) {
result = hfa384x_corereset(hw,
prism2_reset_holdtime,
prism2_reset_settletime, 0);
if (result != 0) {
unregister_wlandev(wlandev);
hfa384x_destroy(hw);
dev_err(&interface->dev, "hfa384x_corereset() failed.\n");
kfree(wlandev);
kfree(hw);
wlandev = NULL;
return -ENODEV;
}
}
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_enable);
return 0;
}
#else
#define prism2sta_suspend NULL
#define prism2sta_resume NULL
#endif /* CONFIG_PM */
static struct usb_driver prism2_usb_driver = {
.name = "prism2_usb",
.probe = prism2sta_probe_usb,
.disconnect = prism2sta_disconnect_usb,
.id_table = usb_prism_tbl,
.suspend = prism2sta_suspend,
.resume = prism2sta_resume,
.reset_resume = prism2sta_resume,
/* fops, minor? */
};
module_usb_driver(prism2_usb_driver);
| linux-master | drivers/staging/wlan-ng/prism2usb.c |
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/*
*
* Management request for mibset/mibget
*
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
* --------------------------------------------------------------------
*
* linux-wlan
*
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
* made directly to:
*
* AbsoluteValue Systems Inc.
* [email protected]
* http://www.linux-wlan.com
*
* --------------------------------------------------------------------
*
* Portions of the development of this software were funded by
* Intersil Corporation as part of PRISM(R) chipset product development.
*
* --------------------------------------------------------------------
*
* The functions in this file handle the mibset/mibget management
* functions.
*
* --------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/netdevice.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/usb.h>
#include <linux/bitops.h>
#include "p80211types.h"
#include "p80211hdr.h"
#include "p80211mgmt.h"
#include "p80211conv.h"
#include "p80211msg.h"
#include "p80211netdev.h"
#include "p80211metadef.h"
#include "p80211metastruct.h"
#include "hfa384x.h"
#include "prism2mgmt.h"
#define MIB_TMP_MAXLEN 200 /* Max length of RID record (in bytes). */
#define F_STA 0x1 /* MIB is supported on stations. */
#define F_READ 0x2 /* MIB may be read. */
#define F_WRITE 0x4 /* MIB may be written. */
struct mibrec {
u32 did;
u16 flag;
u16 parm1;
u16 parm2;
u16 parm3;
int (*func)(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data);
};
static int prism2mib_bytearea2pstr(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data);
static int prism2mib_uint32(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data);
static int prism2mib_flag(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data);
static int prism2mib_wepdefaultkey(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data);
static int prism2mib_privacyinvoked(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data);
static int
prism2mib_fragmentationthreshold(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data);
static int prism2mib_priv(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data);
static struct mibrec mibtab[] = {
/* dot11smt MIB's */
{didmib_dot11smt_wepdefaultkeystable_key(1),
F_STA | F_WRITE,
HFA384x_RID_CNFWEPDEFAULTKEY0, 0, 0,
prism2mib_wepdefaultkey},
{didmib_dot11smt_wepdefaultkeystable_key(2),
F_STA | F_WRITE,
HFA384x_RID_CNFWEPDEFAULTKEY1, 0, 0,
prism2mib_wepdefaultkey},
{didmib_dot11smt_wepdefaultkeystable_key(3),
F_STA | F_WRITE,
HFA384x_RID_CNFWEPDEFAULTKEY2, 0, 0,
prism2mib_wepdefaultkey},
{didmib_dot11smt_wepdefaultkeystable_key(4),
F_STA | F_WRITE,
HFA384x_RID_CNFWEPDEFAULTKEY3, 0, 0,
prism2mib_wepdefaultkey},
{DIDMIB_DOT11SMT_PRIVACYTABLE_PRIVACYINVOKED,
F_STA | F_READ | F_WRITE,
HFA384x_RID_CNFWEPFLAGS, HFA384x_WEPFLAGS_PRIVINVOKED, 0,
prism2mib_privacyinvoked},
{DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
F_STA | F_READ | F_WRITE,
HFA384x_RID_CNFWEPDEFAULTKEYID, 0, 0,
prism2mib_uint32},
{DIDMIB_DOT11SMT_PRIVACYTABLE_EXCLUDEUNENCRYPTED,
F_STA | F_READ | F_WRITE,
HFA384x_RID_CNFWEPFLAGS, HFA384x_WEPFLAGS_EXCLUDE, 0,
prism2mib_flag},
/* dot11mac MIB's */
{DIDMIB_DOT11MAC_OPERATIONTABLE_MACADDRESS,
F_STA | F_READ | F_WRITE,
HFA384x_RID_CNFOWNMACADDR, HFA384x_RID_CNFOWNMACADDR_LEN, 0,
prism2mib_bytearea2pstr},
{DIDMIB_DOT11MAC_OPERATIONTABLE_RTSTHRESHOLD,
F_STA | F_READ | F_WRITE,
HFA384x_RID_RTSTHRESH, 0, 0,
prism2mib_uint32},
{DIDMIB_DOT11MAC_OPERATIONTABLE_SHORTRETRYLIMIT,
F_STA | F_READ,
HFA384x_RID_SHORTRETRYLIMIT, 0, 0,
prism2mib_uint32},
{DIDMIB_DOT11MAC_OPERATIONTABLE_LONGRETRYLIMIT,
F_STA | F_READ,
HFA384x_RID_LONGRETRYLIMIT, 0, 0,
prism2mib_uint32},
{DIDMIB_DOT11MAC_OPERATIONTABLE_FRAGMENTATIONTHRESHOLD,
F_STA | F_READ | F_WRITE,
HFA384x_RID_FRAGTHRESH, 0, 0,
prism2mib_fragmentationthreshold},
{DIDMIB_DOT11MAC_OPERATIONTABLE_MAXTRANSMITMSDULIFETIME,
F_STA | F_READ,
HFA384x_RID_MAXTXLIFETIME, 0, 0,
prism2mib_uint32},
/* dot11phy MIB's */
{DIDMIB_DOT11PHY_DSSSTABLE_CURRENTCHANNEL,
F_STA | F_READ,
HFA384x_RID_CURRENTCHANNEL, 0, 0,
prism2mib_uint32},
{DIDMIB_DOT11PHY_TXPOWERTABLE_CURRENTTXPOWERLEVEL,
F_STA | F_READ | F_WRITE,
HFA384x_RID_TXPOWERMAX, 0, 0,
prism2mib_uint32},
/* p2Static MIB's */
{DIDMIB_P2_STATIC_CNFPORTTYPE,
F_STA | F_READ | F_WRITE,
HFA384x_RID_CNFPORTTYPE, 0, 0,
prism2mib_uint32},
/* p2MAC MIB's */
{DIDMIB_P2_MAC_CURRENTTXRATE,
F_STA | F_READ,
HFA384x_RID_CURRENTTXRATE, 0, 0,
prism2mib_uint32},
/* And finally, lnx mibs */
{DIDMIB_LNX_CONFIGTABLE_RSNAIE,
F_STA | F_READ | F_WRITE,
HFA384x_RID_CNFWPADATA, 0, 0,
prism2mib_priv},
{0, 0, 0, 0, 0, NULL}
};
/*
* prism2mgmt_mibset_mibget
*
* Set the value of a mib item.
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* 0 success and done
* <0 success, but we're waiting for something to finish.
* >0 an error occurred while handling the message.
* Side effects:
*
* Call context:
* process thread (usually)
* interrupt
*/
int prism2mgmt_mibset_mibget(struct wlandevice *wlandev, void *msgp)
{
struct hfa384x *hw = wlandev->priv;
int result, isget;
struct mibrec *mib;
u16 which;
struct p80211msg_dot11req_mibset *msg = msgp;
struct p80211itemd *mibitem;
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
msg->resultcode.data = P80211ENUM_resultcode_success;
/*
** Determine if this is an Access Point or a station.
*/
which = F_STA;
/*
** Find the MIB in the MIB table. Note that a MIB may be in the
** table twice...once for an AP and once for a station. Make sure
** to get the correct one. Note that DID=0 marks the end of the
** MIB table.
*/
mibitem = (struct p80211itemd *)msg->mibattribute.data;
for (mib = mibtab; mib->did != 0; mib++)
if (mib->did == mibitem->did && (mib->flag & which))
break;
if (mib->did == 0) {
msg->resultcode.data = P80211ENUM_resultcode_not_supported;
goto done;
}
/*
** Determine if this is a "mibget" or a "mibset". If this is a
** "mibget", then make sure that the MIB may be read. Otherwise,
** this is a "mibset" so make sure that the MIB may be written.
*/
isget = (msg->msgcode == DIDMSG_DOT11REQ_MIBGET);
if (isget) {
if (!(mib->flag & F_READ)) {
msg->resultcode.data =
P80211ENUM_resultcode_cant_get_writeonly_mib;
goto done;
}
} else {
if (!(mib->flag & F_WRITE)) {
msg->resultcode.data =
P80211ENUM_resultcode_cant_set_readonly_mib;
goto done;
}
}
/*
** Execute the MIB function. If things worked okay, then make
** sure that the MIB function also worked okay. If so, and this
** is a "mibget", then the status value must be set for both the
** "mibattribute" parameter and the mib item within the data
** portion of the "mibattribute".
*/
result = mib->func(mib, isget, wlandev, hw, msg, (void *)mibitem->data);
if (msg->resultcode.data == P80211ENUM_resultcode_success) {
if (result != 0) {
pr_debug("get/set failure, result=%d\n", result);
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
} else {
if (isget) {
msg->mibattribute.status =
P80211ENUM_msgitem_status_data_ok;
mibitem->status =
P80211ENUM_msgitem_status_data_ok;
}
}
}
done:
return 0;
}
/*
* prism2mib_bytearea2pstr
*
* Get/set pstr data to/from a byte area.
*
* MIB record parameters:
* parm1 Prism2 RID value.
* parm2 Number of bytes of RID data.
* parm3 Not used.
*
* Arguments:
* mib MIB record.
* isget MIBGET/MIBSET flag.
* wlandev wlan device structure.
* priv "priv" structure.
* hw "hw" structure.
* msg Message structure.
* data Data buffer.
*
* Returns:
* 0 - Success.
* ~0 - Error.
*
*/
static int prism2mib_bytearea2pstr(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data)
{
int result;
struct p80211pstrd *pstr = data;
u8 bytebuf[MIB_TMP_MAXLEN];
if (isget) {
result =
hfa384x_drvr_getconfig(hw, mib->parm1, bytebuf, mib->parm2);
prism2mgmt_bytearea2pstr(bytebuf, pstr, mib->parm2);
} else {
memset(bytebuf, 0, mib->parm2);
memcpy(bytebuf, pstr->data, pstr->len);
result =
hfa384x_drvr_setconfig(hw, mib->parm1, bytebuf, mib->parm2);
}
return result;
}
/*
* prism2mib_uint32
*
* Get/set uint32 data.
*
* MIB record parameters:
* parm1 Prism2 RID value.
* parm2 Not used.
* parm3 Not used.
*
* Arguments:
* mib MIB record.
* isget MIBGET/MIBSET flag.
* wlandev wlan device structure.
* priv "priv" structure.
* hw "hw" structure.
* msg Message structure.
* data Data buffer.
*
* Returns:
* 0 - Success.
* ~0 - Error.
*
*/
static int prism2mib_uint32(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data)
{
int result;
u32 *uint32 = data;
u8 bytebuf[MIB_TMP_MAXLEN];
u16 *wordbuf = (u16 *)bytebuf;
if (isget) {
result = hfa384x_drvr_getconfig16(hw, mib->parm1, wordbuf);
*uint32 = *wordbuf;
} else {
*wordbuf = *uint32;
result = hfa384x_drvr_setconfig16(hw, mib->parm1, *wordbuf);
}
return result;
}
/*
* prism2mib_flag
*
* Get/set a flag.
*
* MIB record parameters:
* parm1 Prism2 RID value.
* parm2 Bit to get/set.
* parm3 Not used.
*
* Arguments:
* mib MIB record.
* isget MIBGET/MIBSET flag.
* wlandev wlan device structure.
* priv "priv" structure.
* hw "hw" structure.
* msg Message structure.
* data Data buffer.
*
* Returns:
* 0 - Success.
* ~0 - Error.
*
*/
static int prism2mib_flag(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data)
{
int result;
u32 *uint32 = data;
u8 bytebuf[MIB_TMP_MAXLEN];
u16 *wordbuf = (u16 *)bytebuf;
u32 flags;
result = hfa384x_drvr_getconfig16(hw, mib->parm1, wordbuf);
if (result == 0) {
flags = *wordbuf;
if (isget) {
*uint32 = (flags & mib->parm2) ?
P80211ENUM_truth_true : P80211ENUM_truth_false;
} else {
if ((*uint32) == P80211ENUM_truth_true)
flags |= mib->parm2;
else
flags &= ~mib->parm2;
*wordbuf = flags;
result =
hfa384x_drvr_setconfig16(hw, mib->parm1, *wordbuf);
}
}
return result;
}
/*
* prism2mib_wepdefaultkey
*
* Get/set WEP default keys.
*
* MIB record parameters:
* parm1 Prism2 RID value.
* parm2 Number of bytes of RID data.
* parm3 Not used.
*
* Arguments:
* mib MIB record.
* isget MIBGET/MIBSET flag.
* wlandev wlan device structure.
* priv "priv" structure.
* hw "hw" structure.
* msg Message structure.
* data Data buffer.
*
* Returns:
* 0 - Success.
* ~0 - Error.
*
*/
static int prism2mib_wepdefaultkey(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data)
{
int result;
struct p80211pstrd *pstr = data;
u8 bytebuf[MIB_TMP_MAXLEN];
u16 len;
if (isget) {
result = 0; /* Should never happen. */
} else {
len = (pstr->len > 5) ? HFA384x_RID_CNFWEP128DEFAULTKEY_LEN :
HFA384x_RID_CNFWEPDEFAULTKEY_LEN;
memset(bytebuf, 0, len);
memcpy(bytebuf, pstr->data, pstr->len);
result = hfa384x_drvr_setconfig(hw, mib->parm1, bytebuf, len);
}
return result;
}
/*
* prism2mib_privacyinvoked
*
* Get/set the dot11PrivacyInvoked value.
*
* MIB record parameters:
* parm1 Prism2 RID value.
* parm2 Bit value for PrivacyInvoked flag.
* parm3 Not used.
*
* Arguments:
* mib MIB record.
* isget MIBGET/MIBSET flag.
* wlandev wlan device structure.
* priv "priv" structure.
* hw "hw" structure.
* msg Message structure.
* data Data buffer.
*
* Returns:
* 0 - Success.
* ~0 - Error.
*
*/
static int prism2mib_privacyinvoked(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data)
{
if (wlandev->hostwep & HOSTWEP_DECRYPT) {
if (wlandev->hostwep & HOSTWEP_DECRYPT)
mib->parm2 |= HFA384x_WEPFLAGS_DISABLE_RXCRYPT;
if (wlandev->hostwep & HOSTWEP_ENCRYPT)
mib->parm2 |= HFA384x_WEPFLAGS_DISABLE_TXCRYPT;
}
return prism2mib_flag(mib, isget, wlandev, hw, msg, data);
}
/*
* prism2mib_fragmentationthreshold
*
* Get/set the fragmentation threshold.
*
* MIB record parameters:
* parm1 Prism2 RID value.
* parm2 Not used.
* parm3 Not used.
*
* Arguments:
* mib MIB record.
* isget MIBGET/MIBSET flag.
* wlandev wlan device structure.
* priv "priv" structure.
* hw "hw" structure.
* msg Message structure.
* data Data buffer.
*
* Returns:
* 0 - Success.
* ~0 - Error.
*
*/
static int
prism2mib_fragmentationthreshold(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data)
{
u32 *uint32 = data;
if (!isget)
if ((*uint32) % 2) {
netdev_warn(wlandev->netdev,
"Attempt to set odd number FragmentationThreshold\n");
msg->resultcode.data =
P80211ENUM_resultcode_not_supported;
return 0;
}
return prism2mib_uint32(mib, isget, wlandev, hw, msg, data);
}
/*
* prism2mib_priv
*
* Get/set values in the "priv" data structure.
*
* MIB record parameters:
* parm1 Not used.
* parm2 Not used.
* parm3 Not used.
*
* Arguments:
* mib MIB record.
* isget MIBGET/MIBSET flag.
* wlandev wlan device structure.
* priv "priv" structure.
* hw "hw" structure.
* msg Message structure.
* data Data buffer.
*
* Returns:
* 0 - Success.
* ~0 - Error.
*
*/
static int prism2mib_priv(struct mibrec *mib,
int isget,
struct wlandevice *wlandev,
struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data)
{
struct p80211pstrd *pstr = data;
switch (mib->did) {
case DIDMIB_LNX_CONFIGTABLE_RSNAIE: {
/*
* This can never work: wpa is on the stack
* and has no bytes allocated in wpa.data.
*/
struct hfa384x_wpa_data wpa;
if (isget) {
hfa384x_drvr_getconfig(hw,
HFA384x_RID_CNFWPADATA,
(u8 *)&wpa,
sizeof(wpa));
pstr->len = 0;
} else {
wpa.datalen = 0;
hfa384x_drvr_setconfig(hw,
HFA384x_RID_CNFWPADATA,
(u8 *)&wpa,
sizeof(wpa));
}
break;
}
default:
netdev_err(wlandev->netdev, "Unhandled DID 0x%08x\n", mib->did);
}
return 0;
}
/*
* prism2mgmt_pstr2bytestr
*
* Convert the pstr data in the WLAN message structure into an hfa384x
* byte string format.
*
* Arguments:
* bytestr hfa384x byte string data type
* pstr wlan message data
*
* Returns:
* Nothing
*
*/
void prism2mgmt_pstr2bytestr(struct hfa384x_bytestr *bytestr,
struct p80211pstrd *pstr)
{
bytestr->len = cpu_to_le16((u16)(pstr->len));
memcpy(bytestr->data, pstr->data, pstr->len);
}
/*
* prism2mgmt_bytestr2pstr
*
* Convert the data in an hfa384x byte string format into a
* pstr in the WLAN message.
*
* Arguments:
* bytestr hfa384x byte string data type
* msg wlan message
*
* Returns:
* Nothing
*
*/
void prism2mgmt_bytestr2pstr(struct hfa384x_bytestr *bytestr,
struct p80211pstrd *pstr)
{
pstr->len = (u8)(le16_to_cpu(bytestr->len));
memcpy(pstr->data, bytestr->data, pstr->len);
}
/*
* prism2mgmt_bytearea2pstr
*
* Convert the data in an hfa384x byte area format into a pstr
* in the WLAN message.
*
* Arguments:
* bytearea hfa384x byte area data type
* msg wlan message
*
* Returns:
* Nothing
*
*/
void prism2mgmt_bytearea2pstr(u8 *bytearea, struct p80211pstrd *pstr, int len)
{
pstr->len = (u8)len;
memcpy(pstr->data, bytearea, len);
}
| linux-master | drivers/staging/wlan-ng/prism2mib.c |
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/*
*
* WEP encode/decode for P80211.
*
* Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved.
* --------------------------------------------------------------------
*
* linux-wlan
*
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
* made directly to:
*
* AbsoluteValue Systems Inc.
* [email protected]
* http://www.linux-wlan.com
*
* --------------------------------------------------------------------
*
* Portions of the development of this software were funded by
* Intersil Corporation as part of PRISM(R) chipset product development.
*
* --------------------------------------------------------------------
*/
/*================================================================*/
/* System Includes */
#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <linux/random.h>
#include <linux/kernel.h>
#include "p80211hdr.h"
#include "p80211types.h"
#include "p80211msg.h"
#include "p80211conv.h"
#include "p80211netdev.h"
#define WEP_KEY(x) (((x) & 0xC0) >> 6)
/* keylen in bytes! */
int wep_change_key(struct wlandevice *wlandev, int keynum, u8 *key, int keylen)
{
if (keylen < 0)
return -1;
if (keylen >= MAX_KEYLEN)
return -1;
if (!key)
return -1;
if (keynum < 0)
return -1;
if (keynum >= NUM_WEPKEYS)
return -1;
wlandev->wep_keylens[keynum] = keylen;
memcpy(wlandev->wep_keys[keynum], key, keylen);
return 0;
}
/*
* 4-byte IV at start of buffer, 4-byte ICV at end of buffer.
* if successful, buf start is payload begin, length -= 8;
*/
int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
u8 *iv, u8 *icv)
{
u32 i, j, k, crc, keylen;
u8 s[256], key[64], c_crc[4];
u8 keyidx;
/* Needs to be at least 8 bytes of payload */
if (len <= 0)
return -1;
/* initialize the first bytes of the key from the IV */
key[0] = iv[0];
key[1] = iv[1];
key[2] = iv[2];
keyidx = WEP_KEY(iv[3]);
if (key_override >= 0)
keyidx = key_override;
if (keyidx >= NUM_WEPKEYS)
return -2;
keylen = wlandev->wep_keylens[keyidx];
if (keylen == 0)
return -3;
/* copy the rest of the key over from the designated key */
memcpy(key + 3, wlandev->wep_keys[keyidx], keylen);
keylen += 3; /* add in IV bytes */
/* set up the RC4 state */
for (i = 0; i < 256; i++)
s[i] = i;
j = 0;
for (i = 0; i < 256; i++) {
j = (j + s[i] + key[i % keylen]) & 0xff;
swap(i, j);
}
/* Apply the RC4 to the data, update the CRC32 */
i = 0;
j = 0;
for (k = 0; k < len; k++) {
i = (i + 1) & 0xff;
j = (j + s[i]) & 0xff;
swap(i, j);
buf[k] ^= s[(s[i] + s[j]) & 0xff];
}
crc = ~crc32_le(~0, buf, len);
/* now let's check the crc */
c_crc[0] = crc;
c_crc[1] = crc >> 8;
c_crc[2] = crc >> 16;
c_crc[3] = crc >> 24;
for (k = 0; k < 4; k++) {
i = (i + 1) & 0xff;
j = (j + s[i]) & 0xff;
swap(i, j);
if ((c_crc[k] ^ s[(s[i] + s[j]) & 0xff]) != icv[k])
return -(4 | (k << 4)); /* ICV mismatch */
}
return 0;
}
/* encrypts in-place. */
int wep_encrypt(struct wlandevice *wlandev, u8 *buf,
u8 *dst, u32 len, int keynum, u8 *iv, u8 *icv)
{
u32 i, j, k, crc, keylen;
u8 s[256], key[64];
/* no point in WEPping an empty frame */
if (len <= 0)
return -1;
/* we need to have a real key.. */
if (keynum >= NUM_WEPKEYS)
return -2;
keylen = wlandev->wep_keylens[keynum];
if (keylen <= 0)
return -3;
/* use a random IV. And skip known weak ones. */
get_random_bytes(iv, 3);
while ((iv[1] == 0xff) && (iv[0] >= 3) && (iv[0] < keylen))
get_random_bytes(iv, 3);
iv[3] = (keynum & 0x03) << 6;
key[0] = iv[0];
key[1] = iv[1];
key[2] = iv[2];
/* copy the rest of the key over from the designated key */
memcpy(key + 3, wlandev->wep_keys[keynum], keylen);
keylen += 3; /* add in IV bytes */
/* set up the RC4 state */
for (i = 0; i < 256; i++)
s[i] = i;
j = 0;
for (i = 0; i < 256; i++) {
j = (j + s[i] + key[i % keylen]) & 0xff;
swap(i, j);
}
/* Update CRC32 then apply RC4 to the data */
i = 0;
j = 0;
for (k = 0; k < len; k++) {
i = (i + 1) & 0xff;
j = (j + s[i]) & 0xff;
swap(i, j);
dst[k] = buf[k] ^ s[(s[i] + s[j]) & 0xff];
}
crc = ~crc32_le(~0, buf, len);
/* now let's encrypt the crc */
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
for (k = 0; k < 4; k++) {
i = (i + 1) & 0xff;
j = (j + s[i]) & 0xff;
swap(i, j);
icv[k] ^= s[(s[i] + s[j]) & 0xff];
}
return 0;
}
| linux-master | drivers/staging/wlan-ng/p80211wep.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG ([email protected])
* Micky Ching ([email protected])
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include "rtsx.h"
#include "ms.h"
static inline void ms_set_err_code(struct rtsx_chip *chip, u8 err_code)
{
struct ms_info *ms_card = &chip->ms_card;
ms_card->err_code = err_code;
}
static inline int ms_check_err_code(struct rtsx_chip *chip, u8 err_code)
{
struct ms_info *ms_card = &chip->ms_card;
return (ms_card->err_code == err_code);
}
static int ms_parse_err_code(struct rtsx_chip *chip)
{
return STATUS_FAIL;
}
static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
u8 tpc, u8 cnt, u8 cfg)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
u8 *ptr;
dev_dbg(rtsx_dev(chip), "%s: tpc = 0x%x\n", __func__, tpc);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER,
0xFF, MS_TRANSFER_START | trans_mode);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
retval = rtsx_send_cmd(chip, MS_CARD, 5000);
if (retval < 0) {
rtsx_clear_ms_error(chip);
ms_set_err_code(chip, MS_TO_ERROR);
return ms_parse_err_code(chip);
}
ptr = rtsx_get_cmd_data(chip) + 1;
if (!(tpc & 0x08)) { /* Read Packet */
if (*ptr & MS_CRC16_ERR) {
ms_set_err_code(chip, MS_CRC16_ERROR);
return ms_parse_err_code(chip);
}
} else { /* Write Packet */
if (CHK_MSPRO(ms_card) && !(*ptr & 0x80)) {
if (*ptr & (MS_INT_ERR | MS_INT_CMDNK)) {
ms_set_err_code(chip, MS_CMD_NK);
return ms_parse_err_code(chip);
}
}
}
if (*ptr & MS_RDY_TIMEOUT) {
rtsx_clear_ms_error(chip);
ms_set_err_code(chip, MS_TO_ERROR);
return ms_parse_err_code(chip);
}
return STATUS_SUCCESS;
}
static int ms_transfer_data(struct rtsx_chip *chip, u8 trans_mode,
u8 tpc, u16 sec_cnt, u8 cfg, bool mode_2k,
int use_sg, void *buf, int buf_len)
{
int retval;
u8 val, err_code = 0;
enum dma_data_direction dir;
if (!buf || !buf_len)
return STATUS_FAIL;
if (trans_mode == MS_TM_AUTO_READ) {
dir = DMA_FROM_DEVICE;
err_code = MS_FLASH_READ_ERROR;
} else if (trans_mode == MS_TM_AUTO_WRITE) {
dir = DMA_TO_DEVICE;
err_code = MS_FLASH_WRITE_ERROR;
} else {
return STATUS_FAIL;
}
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
rtsx_add_cmd(chip, WRITE_REG_CMD,
MS_SECTOR_CNT_H, 0xFF, (u8)(sec_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF, (u8)sec_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
if (mode_2k) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
MS_CFG, MS_2K_SECTOR_MODE, MS_2K_SECTOR_MODE);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_CFG, MS_2K_SECTOR_MODE, 0);
}
trans_dma_enable(dir, chip, sec_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD,
MS_TRANSFER, 0xFF, MS_TRANSFER_START | trans_mode);
rtsx_add_cmd(chip, CHECK_REG_CMD,
MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data(chip, MS_CARD, buf, buf_len,
use_sg, dir, chip->mspro_timeout);
if (retval < 0) {
ms_set_err_code(chip, err_code);
if (retval == -ETIMEDOUT)
retval = STATUS_TIMEDOUT;
else
retval = STATUS_FAIL;
return retval;
}
retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
if (retval)
return retval;
if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT))
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int ms_write_bytes(struct rtsx_chip *chip,
u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
if (!data || data_len < cnt)
return STATUS_ERROR;
rtsx_init_cmd(chip);
for (i = 0; i < cnt; i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
PPBUF_BASE2 + i, 0xFF, data[i]);
}
if (cnt % 2)
rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD,
MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES);
rtsx_add_cmd(chip, CHECK_REG_CMD,
MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END);
retval = rtsx_send_cmd(chip, MS_CARD, 5000);
if (retval < 0) {
u8 val = 0;
rtsx_read_register(chip, MS_TRANS_CFG, &val);
dev_dbg(rtsx_dev(chip), "MS_TRANS_CFG: 0x%02x\n", val);
rtsx_clear_ms_error(chip);
if (!(tpc & 0x08)) {
if (val & MS_CRC16_ERR) {
ms_set_err_code(chip, MS_CRC16_ERROR);
return ms_parse_err_code(chip);
}
} else {
if (CHK_MSPRO(ms_card) && !(val & 0x80)) {
if (val & (MS_INT_ERR | MS_INT_CMDNK)) {
ms_set_err_code(chip, MS_CMD_NK);
return ms_parse_err_code(chip);
}
}
}
if (val & MS_RDY_TIMEOUT) {
ms_set_err_code(chip, MS_TO_ERROR);
return ms_parse_err_code(chip);
}
ms_set_err_code(chip, MS_TO_ERROR);
return ms_parse_err_code(chip);
}
return STATUS_SUCCESS;
}
static int ms_read_bytes(struct rtsx_chip *chip,
u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 *ptr;
if (!data)
return STATUS_ERROR;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
MS_TRANSFER_START | MS_TM_READ_BYTES);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
MS_TRANSFER_END, MS_TRANSFER_END);
for (i = 0; i < data_len - 1; i++)
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
if (data_len % 2)
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len, 0, 0);
else
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len - 1,
0, 0);
retval = rtsx_send_cmd(chip, MS_CARD, 5000);
if (retval < 0) {
u8 val = 0;
rtsx_read_register(chip, MS_TRANS_CFG, &val);
rtsx_clear_ms_error(chip);
if (!(tpc & 0x08)) {
if (val & MS_CRC16_ERR) {
ms_set_err_code(chip, MS_CRC16_ERROR);
return ms_parse_err_code(chip);
}
} else {
if (CHK_MSPRO(ms_card) && !(val & 0x80)) {
if (val & (MS_INT_ERR | MS_INT_CMDNK)) {
ms_set_err_code(chip, MS_CMD_NK);
return ms_parse_err_code(chip);
}
}
}
if (val & MS_RDY_TIMEOUT) {
ms_set_err_code(chip, MS_TO_ERROR);
return ms_parse_err_code(chip);
}
ms_set_err_code(chip, MS_TO_ERROR);
return ms_parse_err_code(chip);
}
ptr = rtsx_get_cmd_data(chip) + 1;
for (i = 0; i < data_len; i++)
data[i] = ptr[i];
if (tpc == PRO_READ_SHORT_DATA && data_len == 8) {
dev_dbg(rtsx_dev(chip), "Read format progress:\n");
print_hex_dump_bytes(KBUILD_MODNAME ": ", DUMP_PREFIX_NONE, ptr,
cnt);
}
return STATUS_SUCCESS;
}
static int ms_set_rw_reg_addr(struct rtsx_chip *chip, u8 read_start,
u8 read_cnt, u8 write_start, u8 write_cnt)
{
int retval, i;
u8 data[4];
data[0] = read_start;
data[1] = read_cnt;
data[2] = write_start;
data[3] = write_cnt;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_write_bytes(chip, SET_RW_REG_ADRS, 4,
NO_WAIT_INT, data, 4);
if (retval == STATUS_SUCCESS)
return STATUS_SUCCESS;
rtsx_clear_ms_error(chip);
}
return STATUS_FAIL;
}
static int ms_send_cmd(struct rtsx_chip *chip, u8 cmd, u8 cfg)
{
u8 data[2];
data[0] = cmd;
data[1] = 0;
return ms_write_bytes(chip, PRO_SET_CMD, 1, cfg, data, 1);
}
static int ms_set_init_para(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
if (CHK_HG8BIT(ms_card)) {
if (chip->asic_code)
ms_card->ms_clock = chip->asic_ms_hg_clk;
else
ms_card->ms_clock = chip->fpga_ms_hg_clk;
} else if (CHK_MSPRO(ms_card) || CHK_MS4BIT(ms_card)) {
if (chip->asic_code)
ms_card->ms_clock = chip->asic_ms_4bit_clk;
else
ms_card->ms_clock = chip->fpga_ms_4bit_clk;
} else {
if (chip->asic_code)
ms_card->ms_clock = chip->asic_ms_1bit_clk;
else
ms_card->ms_clock = chip->fpga_ms_1bit_clk;
}
retval = switch_clock(chip, ms_card->ms_clock);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = select_card(chip, MS_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int ms_switch_clock(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
retval = select_card(chip, MS_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = switch_clock(chip, ms_card->ms_clock);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int ms_pull_ctl_disable(struct rtsx_chip *chip)
{
int retval;
if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
MS_D1_PD | MS_D2_PD | MS_CLK_PD |
MS_D6_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
MS_D3_PD | MS_D0_PD | MS_BS_PD |
XD_D4_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
MS_D7_PD | XD_CE_PD | XD_CLE_PD |
XD_CD_PU);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
XD_RDY_PD | SD_D3_PD | SD_D2_PD |
XD_ALE_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
MS_INS_PU | SD_WP_PD | SD_CD_PU |
SD_CMD_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL6, 0xFF,
MS_D5_PD | MS_D4_PD);
if (retval)
return retval;
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1,
0xFF, 0x55);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL2,
0xFF, 0x55);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL3,
0xFF, 0x4B);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL4,
0xFF, 0x69);
if (retval)
return retval;
}
}
return STATUS_SUCCESS;
}
static int ms_pull_ctl_enable(struct rtsx_chip *chip)
{
int retval;
rtsx_init_cmd(chip);
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
CARD_PULL_CTL1, 0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD,
CARD_PULL_CTL2, 0xFF, 0x45);
rtsx_add_cmd(chip, WRITE_REG_CMD,
CARD_PULL_CTL3, 0xFF, 0x4B);
rtsx_add_cmd(chip, WRITE_REG_CMD,
CARD_PULL_CTL4, 0xFF, 0x29);
}
}
retval = rtsx_send_cmd(chip, MS_CARD, 100);
if (retval < 0)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int ms_prepare_reset(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
u8 oc_mask = 0;
ms_card->ms_type = 0;
ms_card->check_ms_flow = 0;
ms_card->switch_8bit_fail = 0;
ms_card->delay_write.delay_write_flag = 0;
ms_card->pro_under_formatting = 0;
retval = ms_power_off_card3v3(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (!chip->ft2_fast_mode)
wait_timeout(250);
retval = enable_card_clock(chip, MS_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (chip->asic_code) {
retval = ms_pull_ctl_enable(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL,
FPGA_MS_PULL_CTL_BIT | 0x20, 0);
if (retval)
return retval;
}
if (!chip->ft2_fast_mode) {
retval = card_power_on(chip, MS_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
wait_timeout(150);
#ifdef SUPPORT_OCP
if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
oc_mask = MS_OC_NOW | MS_OC_EVER;
else
oc_mask = SD_OC_NOW | SD_OC_EVER;
if (chip->ocp_stat & oc_mask) {
dev_dbg(rtsx_dev(chip), "Over current, OCPSTAT is 0x%x\n",
chip->ocp_stat);
return STATUS_FAIL;
}
#endif
}
retval = rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN,
MS_OUTPUT_EN);
if (retval)
return retval;
if (chip->asic_code) {
retval = rtsx_write_register(chip, MS_CFG, 0xFF,
SAMPLE_TIME_RISING |
PUSH_TIME_DEFAULT |
NO_EXTEND_TOGGLE |
MS_BUS_WIDTH_1);
if (retval)
return retval;
} else {
retval = rtsx_write_register(chip, MS_CFG, 0xFF,
SAMPLE_TIME_FALLING |
PUSH_TIME_DEFAULT |
NO_EXTEND_TOGGLE |
MS_BUS_WIDTH_1);
if (retval)
return retval;
}
retval = rtsx_write_register(chip, MS_TRANS_CFG, 0xFF,
NO_WAIT_INT | NO_AUTO_READ_INT_REG);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR,
MS_STOP | MS_CLR_ERR);
if (retval)
return retval;
retval = ms_set_init_para(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 val;
retval = ms_set_rw_reg_addr(chip, PRO_STATUS_REG, 6, SYSTEM_PARAM, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG,
6, NO_WAIT_INT);
if (retval == STATUS_SUCCESS)
break;
}
if (i == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
retval = rtsx_read_register(chip, PPBUF_BASE2 + 2, &val);
if (retval)
return retval;
dev_dbg(rtsx_dev(chip), "Type register: 0x%x\n", val);
if (val != 0x01) {
if (val != 0x02)
ms_card->check_ms_flow = 1;
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, PPBUF_BASE2 + 4, &val);
if (retval)
return retval;
dev_dbg(rtsx_dev(chip), "Category register: 0x%x\n", val);
if (val != 0) {
ms_card->check_ms_flow = 1;
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, PPBUF_BASE2 + 5, &val);
if (retval)
return retval;
dev_dbg(rtsx_dev(chip), "Class register: 0x%x\n", val);
if (val == 0) {
retval = rtsx_read_register(chip, PPBUF_BASE2, &val);
if (retval)
return retval;
if (val & WRT_PRTCT)
chip->card_wp |= MS_CARD;
else
chip->card_wp &= ~MS_CARD;
} else if ((val == 0x01) || (val == 0x02) || (val == 0x03)) {
chip->card_wp |= MS_CARD;
} else {
ms_card->check_ms_flow = 1;
return STATUS_FAIL;
}
ms_card->ms_type |= TYPE_MSPRO;
retval = rtsx_read_register(chip, PPBUF_BASE2 + 3, &val);
if (retval)
return retval;
dev_dbg(rtsx_dev(chip), "IF Mode register: 0x%x\n", val);
if (val == 0) {
ms_card->ms_type &= 0x0F;
} else if (val == 7) {
if (switch_8bit_bus)
ms_card->ms_type |= MS_HG;
else
ms_card->ms_type &= 0x0F;
} else {
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
{
int retval, i, k;
u8 val;
/* Confirm CPU StartUp */
k = 0;
do {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
return STATUS_FAIL;
}
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_read_bytes(chip, GET_INT, 1,
NO_WAIT_INT, &val, 1);
if (retval == STATUS_SUCCESS)
break;
}
if (i == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
if (k > 100)
return STATUS_FAIL;
k++;
wait_timeout(100);
} while (!(val & INT_REG_CED));
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval == STATUS_SUCCESS)
break;
}
if (i == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
if (val & INT_REG_ERR) {
if (val & INT_REG_CMDNK)
chip->card_wp |= (MS_CARD);
else
return STATUS_FAIL;
}
/* -- end confirm CPU startup */
return STATUS_SUCCESS;
}
static int ms_switch_parallel_bus(struct rtsx_chip *chip)
{
int retval, i;
u8 data[2];
data[0] = PARALLEL_4BIT_IF;
data[1] = 0;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_write_bytes(chip, WRITE_REG, 1, NO_WAIT_INT,
data, 2);
if (retval == STATUS_SUCCESS)
break;
}
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int ms_switch_8bit_bus(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 data[2];
data[0] = PARALLEL_8BIT_IF;
data[1] = 0;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_write_bytes(chip, WRITE_REG, 1,
NO_WAIT_INT, data, 2);
if (retval == STATUS_SUCCESS)
break;
}
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_register(chip, MS_CFG, 0x98,
MS_BUS_WIDTH_8 | SAMPLE_TIME_FALLING);
if (retval)
return retval;
ms_card->ms_type |= MS_8BIT;
retval = ms_set_init_para(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT,
1, NO_WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int ms_pro_reset_flow(struct rtsx_chip *chip, int switch_8bit_bus)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
for (i = 0; i < 3; i++) {
retval = ms_prepare_reset(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_identify_media_type(chip, switch_8bit_bus);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_confirm_cpu_startup(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_switch_parallel_bus(chip);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
return STATUS_FAIL;
}
continue;
} else {
break;
}
}
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
/* Switch MS-PRO into Parallel mode */
retval = rtsx_write_register(chip, MS_CFG, 0x18, MS_BUS_WIDTH_4);
if (retval)
return retval;
retval = rtsx_write_register(chip, MS_CFG, PUSH_TIME_ODD,
PUSH_TIME_ODD);
if (retval)
return retval;
retval = ms_set_init_para(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
/* If MSPro HG Card, We shall try to switch to 8-bit bus */
if (CHK_MSHG(ms_card) && chip->support_ms_8bit && switch_8bit_bus) {
retval = ms_switch_8bit_bus(chip);
if (retval != STATUS_SUCCESS) {
ms_card->switch_8bit_fail = 1;
return STATUS_FAIL;
}
}
return STATUS_SUCCESS;
}
#ifdef XC_POWERCLASS
static int msxc_change_power(struct rtsx_chip *chip, u8 mode)
{
int retval;
u8 buf[6];
ms_cleanup_work(chip);
retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_DATA_COUNT1, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
buf[0] = 0;
buf[1] = mode;
buf[2] = 0;
buf[3] = 0;
buf[4] = 0;
buf[5] = 0;
retval = ms_write_bytes(chip, PRO_WRITE_REG, 6, NO_WAIT_INT, buf, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_send_cmd(chip, XC_CHG_POWER, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_read_register(chip, MS_TRANS_CFG, buf);
if (retval)
return retval;
if (buf[0] & (MS_INT_CMDNK | MS_INT_ERR))
return STATUS_FAIL;
return STATUS_SUCCESS;
}
#endif
static int ms_read_attribute_info(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 val, *buf, class_code, device_type, sub_class, data[16];
u16 total_blk = 0, blk_size = 0;
#ifdef SUPPORT_MSXC
u32 xc_total_blk = 0, xc_blk_size = 0;
#endif
u32 sys_info_addr = 0, sys_info_size;
#ifdef SUPPORT_PCGL_1P18
u32 model_name_addr = 0, model_name_size;
int found_sys_info = 0, found_model_name = 0;
#endif
retval = ms_set_rw_reg_addr(chip, PRO_INT_REG, 2, PRO_SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (CHK_MS8BIT(ms_card))
data[0] = PARALLEL_8BIT_IF;
else
data[0] = PARALLEL_4BIT_IF;
data[1] = 0;
data[2] = 0x40;
data[3] = 0;
data[4] = 0;
data[5] = 0;
data[6] = 0;
data[7] = 0;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_write_bytes(chip, PRO_WRITE_REG, 7, NO_WAIT_INT,
data, 8);
if (retval == STATUS_SUCCESS)
break;
}
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
buf = kmalloc(64 * 512, GFP_KERNEL);
if (!buf)
return STATUS_ERROR;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_send_cmd(chip, PRO_READ_ATRB, WAIT_INT);
if (retval != STATUS_SUCCESS)
continue;
retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
if (retval != STATUS_SUCCESS) {
kfree(buf);
return STATUS_FAIL;
}
if (!(val & MS_INT_BREQ)) {
kfree(buf);
return STATUS_FAIL;
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ,
PRO_READ_LONG_DATA, 0x40, WAIT_INT,
0, 0, buf, 64 * 512);
if (retval == STATUS_SUCCESS)
break;
rtsx_clear_ms_error(chip);
}
if (retval != STATUS_SUCCESS) {
kfree(buf);
return STATUS_FAIL;
}
i = 0;
do {
retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
if (retval != STATUS_SUCCESS) {
kfree(buf);
return STATUS_FAIL;
}
if ((val & MS_INT_CED) || !(val & MS_INT_BREQ))
break;
retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ,
PRO_READ_LONG_DATA, 0, WAIT_INT);
if (retval != STATUS_SUCCESS) {
kfree(buf);
return STATUS_FAIL;
}
i++;
} while (i < 1024);
if (buf[0] != 0xa5 && buf[1] != 0xc3) {
/* Signature code is wrong */
kfree(buf);
return STATUS_FAIL;
}
if (buf[4] < 1 || buf[4] > 12) {
kfree(buf);
return STATUS_FAIL;
}
for (i = 0; i < buf[4]; i++) {
int cur_addr_off = 16 + i * 12;
#ifdef SUPPORT_MSXC
if (buf[cur_addr_off + 8] == 0x10 ||
buf[cur_addr_off + 8] == 0x13) {
#else
if (buf[cur_addr_off + 8] == 0x10) {
#endif
sys_info_addr = ((u32)buf[cur_addr_off + 0] << 24) |
((u32)buf[cur_addr_off + 1] << 16) |
((u32)buf[cur_addr_off + 2] << 8) |
buf[cur_addr_off + 3];
sys_info_size = ((u32)buf[cur_addr_off + 4] << 24) |
((u32)buf[cur_addr_off + 5] << 16) |
((u32)buf[cur_addr_off + 6] << 8) |
buf[cur_addr_off + 7];
dev_dbg(rtsx_dev(chip), "sys_info_addr = 0x%x, sys_info_size = 0x%x\n",
sys_info_addr, sys_info_size);
if (sys_info_size != 96) {
kfree(buf);
return STATUS_FAIL;
}
if (sys_info_addr < 0x1A0) {
kfree(buf);
return STATUS_FAIL;
}
if ((sys_info_size + sys_info_addr) > 0x8000) {
kfree(buf);
return STATUS_FAIL;
}
#ifdef SUPPORT_MSXC
if (buf[cur_addr_off + 8] == 0x13)
ms_card->ms_type |= MS_XC;
#endif
#ifdef SUPPORT_PCGL_1P18
found_sys_info = 1;
#else
break;
#endif
}
#ifdef SUPPORT_PCGL_1P18
if (buf[cur_addr_off + 8] == 0x15) {
model_name_addr = ((u32)buf[cur_addr_off + 0] << 24) |
((u32)buf[cur_addr_off + 1] << 16) |
((u32)buf[cur_addr_off + 2] << 8) |
buf[cur_addr_off + 3];
model_name_size = ((u32)buf[cur_addr_off + 4] << 24) |
((u32)buf[cur_addr_off + 5] << 16) |
((u32)buf[cur_addr_off + 6] << 8) |
buf[cur_addr_off + 7];
dev_dbg(rtsx_dev(chip), "model_name_addr = 0x%x, model_name_size = 0x%x\n",
model_name_addr, model_name_size);
if (model_name_size != 48) {
kfree(buf);
return STATUS_FAIL;
}
if (model_name_addr < 0x1A0) {
kfree(buf);
return STATUS_FAIL;
}
if ((model_name_size + model_name_addr) > 0x8000) {
kfree(buf);
return STATUS_FAIL;
}
found_model_name = 1;
}
if (found_sys_info && found_model_name)
break;
#endif
}
if (i == buf[4]) {
kfree(buf);
return STATUS_FAIL;
}
class_code = buf[sys_info_addr + 0];
device_type = buf[sys_info_addr + 56];
sub_class = buf[sys_info_addr + 46];
#ifdef SUPPORT_MSXC
if (CHK_MSXC(ms_card)) {
xc_total_blk = ((u32)buf[sys_info_addr + 6] << 24) |
((u32)buf[sys_info_addr + 7] << 16) |
((u32)buf[sys_info_addr + 8] << 8) |
buf[sys_info_addr + 9];
xc_blk_size = ((u32)buf[sys_info_addr + 32] << 24) |
((u32)buf[sys_info_addr + 33] << 16) |
((u32)buf[sys_info_addr + 34] << 8) |
buf[sys_info_addr + 35];
dev_dbg(rtsx_dev(chip), "xc_total_blk = 0x%x, xc_blk_size = 0x%x\n",
xc_total_blk, xc_blk_size);
} else {
total_blk = ((u16)buf[sys_info_addr + 6] << 8) |
buf[sys_info_addr + 7];
blk_size = ((u16)buf[sys_info_addr + 2] << 8) |
buf[sys_info_addr + 3];
dev_dbg(rtsx_dev(chip), "total_blk = 0x%x, blk_size = 0x%x\n",
total_blk, blk_size);
}
#else
total_blk = ((u16)buf[sys_info_addr + 6] << 8) | buf[sys_info_addr + 7];
blk_size = ((u16)buf[sys_info_addr + 2] << 8) | buf[sys_info_addr + 3];
dev_dbg(rtsx_dev(chip), "total_blk = 0x%x, blk_size = 0x%x\n",
total_blk, blk_size);
#endif
dev_dbg(rtsx_dev(chip), "class_code = 0x%x, device_type = 0x%x, sub_class = 0x%x\n",
class_code, device_type, sub_class);
memcpy(ms_card->raw_sys_info, buf + sys_info_addr, 96);
#ifdef SUPPORT_PCGL_1P18
memcpy(ms_card->raw_model_name, buf + model_name_addr, 48);
#endif
kfree(buf);
#ifdef SUPPORT_MSXC
if (CHK_MSXC(ms_card)) {
if (class_code != 0x03)
return STATUS_FAIL;
} else {
if (class_code != 0x02)
return STATUS_FAIL;
}
#else
if (class_code != 0x02)
return STATUS_FAIL;
#endif
if (device_type != 0x00) {
if (device_type == 0x01 || device_type == 0x02 ||
device_type == 0x03) {
chip->card_wp |= MS_CARD;
} else {
return STATUS_FAIL;
}
}
if (sub_class & 0xC0)
return STATUS_FAIL;
dev_dbg(rtsx_dev(chip), "class_code: 0x%x, device_type: 0x%x, sub_class: 0x%x\n",
class_code, device_type, sub_class);
#ifdef SUPPORT_MSXC
if (CHK_MSXC(ms_card)) {
chip->capacity[chip->card2lun[MS_CARD]] =
ms_card->capacity = xc_total_blk * xc_blk_size;
} else {
chip->capacity[chip->card2lun[MS_CARD]] =
ms_card->capacity = total_blk * blk_size;
}
#else
ms_card->capacity = total_blk * blk_size;
chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity;
#endif
return STATUS_SUCCESS;
}
#ifdef SUPPORT_MAGIC_GATE
static int mg_set_tpc_para_sub(struct rtsx_chip *chip,
int type, u8 mg_entry_num);
#endif
static int reset_ms_pro(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
#ifdef XC_POWERCLASS
u8 change_power_class;
if (chip->ms_power_class_en & 0x02)
change_power_class = 2;
else if (chip->ms_power_class_en & 0x01)
change_power_class = 1;
else
change_power_class = 0;
#endif
#ifdef XC_POWERCLASS
retry:
#endif
retval = ms_pro_reset_flow(chip, 1);
if (retval != STATUS_SUCCESS) {
if (ms_card->switch_8bit_fail) {
retval = ms_pro_reset_flow(chip, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
return STATUS_FAIL;
}
}
retval = ms_read_attribute_info(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
#ifdef XC_POWERCLASS
if (CHK_HG8BIT(ms_card))
change_power_class = 0;
if (change_power_class && CHK_MSXC(ms_card)) {
u8 power_class_en = chip->ms_power_class_en;
dev_dbg(rtsx_dev(chip), "power_class_en = 0x%x\n",
power_class_en);
dev_dbg(rtsx_dev(chip), "change_power_class = %d\n",
change_power_class);
if (change_power_class)
power_class_en &= (1 << (change_power_class - 1));
else
power_class_en = 0;
if (power_class_en) {
u8 power_class_mode =
(ms_card->raw_sys_info[46] & 0x18) >> 3;
dev_dbg(rtsx_dev(chip), "power_class_mode = 0x%x",
power_class_mode);
if (change_power_class > power_class_mode)
change_power_class = power_class_mode;
if (change_power_class) {
retval = msxc_change_power(chip,
change_power_class);
if (retval != STATUS_SUCCESS) {
change_power_class--;
goto retry;
}
}
}
}
#endif
#ifdef SUPPORT_MAGIC_GATE
retval = mg_set_tpc_para_sub(chip, 0, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
#endif
if (CHK_HG8BIT(ms_card))
chip->card_bus_width[chip->card2lun[MS_CARD]] = 8;
else
chip->card_bus_width[chip->card2lun[MS_CARD]] = 4;
return STATUS_SUCCESS;
}
static int ms_read_status_reg(struct rtsx_chip *chip)
{
int retval;
u8 val[2];
retval = ms_set_rw_reg_addr(chip, STATUS_REG0, 2, 0, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_read_bytes(chip, READ_REG, 2, NO_WAIT_INT, val, 2);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (val[1] & (STS_UCDT | STS_UCEX | STS_UCFG)) {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int ms_read_extra_data(struct rtsx_chip *chip,
u16 block_addr, u8 page_num, u8 *buf, int buf_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 val, data[10];
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (CHK_MS4BIT(ms_card)) {
/* Parallel interface */
data[0] = 0x88;
} else {
/* Serial interface */
data[0] = 0x80;
}
data[1] = 0;
data[2] = (u8)(block_addr >> 8);
data[3] = (u8)block_addr;
data[4] = 0x40;
data[5] = page_num;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT,
data, 6);
if (retval == STATUS_SUCCESS)
break;
}
if (i == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
if (retval == STATUS_SUCCESS)
break;
}
if (i == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
retval = ms_read_status_reg(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
MS_EXTRA_SIZE, SYSTEM_PARAM,
6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
}
retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT,
data, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (buf && buf_len) {
if (buf_len > MS_EXTRA_SIZE)
buf_len = MS_EXTRA_SIZE;
memcpy(buf, data, buf_len);
}
return STATUS_SUCCESS;
}
static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr,
u8 page_num, u8 *buf, int buf_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 val, data[16];
if (!buf || buf_len < MS_EXTRA_SIZE)
return STATUS_FAIL;
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
SYSTEM_PARAM, 6 + MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (CHK_MS4BIT(ms_card))
data[0] = 0x88;
else
data[0] = 0x80;
data[1] = 0;
data[2] = (u8)(block_addr >> 8);
data[3] = (u8)block_addr;
data[4] = 0x40;
data[5] = page_num;
for (i = 6; i < MS_EXTRA_SIZE + 6; i++)
data[i] = buf[i - 6];
retval = ms_write_bytes(chip, WRITE_REG, (6 + MS_EXTRA_SIZE),
NO_WAIT_INT, data, 16);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
return STATUS_FAIL;
}
}
return STATUS_SUCCESS;
}
static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
u8 val, data[6];
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (CHK_MS4BIT(ms_card))
data[0] = 0x88;
else
data[0] = 0x80;
data[1] = 0;
data[2] = (u8)(block_addr >> 8);
data[3] = (u8)block_addr;
data[4] = 0x20;
data[5] = page_num;
retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
if (!(val & INT_REG_BREQ)) {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
return STATUS_FAIL;
}
retval = ms_read_status_reg(chip);
if (retval != STATUS_SUCCESS)
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
} else {
if (!(val & INT_REG_BREQ)) {
ms_set_err_code(chip, MS_BREQ_ERROR);
return STATUS_FAIL;
}
}
}
retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA,
0, NO_WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR))
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
u8 val, data[8], extra[MS_EXTRA_SIZE];
retval = ms_read_extra_data(chip, phy_blk, 0, extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
if (CHK_MS4BIT(ms_card))
data[0] = 0x88;
else
data[0] = 0x80;
data[1] = 0;
data[2] = (u8)(phy_blk >> 8);
data[3] = (u8)phy_blk;
data[4] = 0x80;
data[5] = 0;
data[6] = extra[0] & 0x7F;
data[7] = 0xFF;
retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT, data, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
return STATUS_FAIL;
}
}
return STATUS_SUCCESS;
}
static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i = 0;
u8 val, data[6];
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
if (CHK_MS4BIT(ms_card))
data[0] = 0x88;
else
data[0] = 0x80;
data[1] = 0;
data[2] = (u8)(phy_blk >> 8);
data[3] = (u8)phy_blk;
data[4] = 0;
data[5] = 0;
retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ERASE_RTY:
retval = ms_send_cmd(chip, BLOCK_ERASE, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (val & INT_REG_CMDNK) {
if (i < 3) {
i++;
goto ERASE_RTY;
}
ms_set_err_code(chip, MS_CMD_NK);
ms_set_bad_block(chip, phy_blk);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
return STATUS_FAIL;
}
}
return STATUS_SUCCESS;
}
static void ms_set_page_status(u16 log_blk, u8 type, u8 *extra, int extra_len)
{
if (!extra || extra_len < MS_EXTRA_SIZE)
return;
memset(extra, 0xFF, MS_EXTRA_SIZE);
if (type == set_PS_NG) {
/* set page status as 1:NG,and block status keep 1:OK */
extra[0] = 0xB8;
} else {
/* set page status as 0:Data Error,and block status keep 1:OK */
extra[0] = 0x98;
}
extra[2] = (u8)(log_blk >> 8);
extra[3] = (u8)log_blk;
}
static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
u8 start_page, u8 end_page)
{
int retval;
u8 extra[MS_EXTRA_SIZE], i;
memset(extra, 0xff, MS_EXTRA_SIZE);
extra[0] = 0xf8; /* Block, page OK, data erased */
extra[1] = 0xff;
extra[2] = (u8)(log_blk >> 8);
extra[3] = (u8)log_blk;
for (i = start_page; i < end_page; i++) {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
return STATUS_FAIL;
}
retval = ms_write_extra_data(chip, phy_blk, i,
extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
u16 log_blk, u8 start_page, u8 end_page)
{
struct ms_info *ms_card = &chip->ms_card;
bool uncorrect_flag = false;
int retval, rty_cnt;
u8 extra[MS_EXTRA_SIZE], val, i, j, data[16];
dev_dbg(rtsx_dev(chip), "Copy page from 0x%x to 0x%x, logical block is 0x%x\n",
old_blk, new_blk, log_blk);
dev_dbg(rtsx_dev(chip), "start_page = %d, end_page = %d\n",
start_page, end_page);
retval = ms_read_extra_data(chip, new_blk, 0, extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_read_status_reg(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_read_register(chip, PPBUF_BASE2, &val);
if (retval)
return retval;
if (val & BUF_FULL) {
retval = ms_send_cmd(chip, CLEAR_BUF, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (!(val & INT_REG_CED)) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
return STATUS_FAIL;
}
}
for (i = start_page; i < end_page; i++) {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
return STATUS_FAIL;
}
retval = ms_read_extra_data(chip, old_blk, i, extra,
MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
MS_EXTRA_SIZE, SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
if (CHK_MS4BIT(ms_card))
data[0] = 0x88;
else
data[0] = 0x80;
data[1] = 0;
data[2] = (u8)(old_blk >> 8);
data[3] = (u8)old_blk;
data[4] = 0x20;
data[5] = i;
retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT,
data, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
retval = ms_read_status_reg(chip);
if (retval != STATUS_SUCCESS) {
uncorrect_flag = true;
dev_dbg(rtsx_dev(chip), "Uncorrectable error\n");
} else {
uncorrect_flag = false;
}
retval = ms_transfer_tpc(chip,
MS_TM_NORMAL_READ,
READ_PAGE_DATA,
0, NO_WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (uncorrect_flag) {
ms_set_page_status(log_blk, set_PS_NG,
extra,
MS_EXTRA_SIZE);
if (i == 0)
extra[0] &= 0xEF;
ms_write_extra_data(chip, old_blk, i,
extra,
MS_EXTRA_SIZE);
dev_dbg(rtsx_dev(chip), "page %d : extra[0] = 0x%x\n",
i, extra[0]);
MS_SET_BAD_BLOCK_FLG(ms_card);
ms_set_page_status(log_blk,
set_PS_error, extra,
MS_EXTRA_SIZE);
ms_write_extra_data(chip, new_blk, i,
extra,
MS_EXTRA_SIZE);
continue;
}
for (rty_cnt = 0; rty_cnt < MS_MAX_RETRY_COUNT;
rty_cnt++) {
retval = ms_transfer_tpc(chip,
MS_TM_NORMAL_WRITE,
WRITE_PAGE_DATA,
0, NO_WAIT_INT);
if (retval == STATUS_SUCCESS)
break;
}
if (rty_cnt == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
}
if (!(val & INT_REG_BREQ)) {
ms_set_err_code(chip, MS_BREQ_ERROR);
return STATUS_FAIL;
}
}
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
SYSTEM_PARAM, (6 + MS_EXTRA_SIZE));
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
if (CHK_MS4BIT(ms_card))
data[0] = 0x88;
else
data[0] = 0x80;
data[1] = 0;
data[2] = (u8)(new_blk >> 8);
data[3] = (u8)new_blk;
data[4] = 0x20;
data[5] = i;
if ((extra[0] & 0x60) != 0x60)
data[6] = extra[0];
else
data[6] = 0xF8;
data[6 + 1] = 0xFF;
data[6 + 2] = (u8)(log_blk >> 8);
data[6 + 3] = (u8)log_blk;
for (j = 4; j <= MS_EXTRA_SIZE; j++)
data[6 + j] = 0xFF;
retval = ms_write_bytes(chip, WRITE_REG, (6 + MS_EXTRA_SIZE),
NO_WAIT_INT, data, 16);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
return STATUS_FAIL;
}
}
if (i == 0) {
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
MS_EXTRA_SIZE, SYSTEM_PARAM,
7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
if (CHK_MS4BIT(ms_card))
data[0] = 0x88;
else
data[0] = 0x80;
data[1] = 0;
data[2] = (u8)(old_blk >> 8);
data[3] = (u8)old_blk;
data[4] = 0x80;
data[5] = 0;
data[6] = 0xEF;
data[7] = 0xFF;
retval = ms_write_bytes(chip, WRITE_REG, 7,
NO_WAIT_INT, data, 8);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1,
NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
ms_set_err_code(chip,
MS_FLASH_WRITE_ERROR);
return STATUS_FAIL;
}
}
}
}
return STATUS_SUCCESS;
}
static int reset_ms(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
u16 i, reg_addr, block_size;
u8 val, extra[MS_EXTRA_SIZE], j, *ptr;
#ifndef SUPPORT_MAGIC_GATE
u16 eblock_cnt;
#endif
retval = ms_prepare_reset(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_card->ms_type |= TYPE_MS;
retval = ms_send_cmd(chip, MS_RESET, NO_WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_read_status_reg(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_read_register(chip, PPBUF_BASE2, &val);
if (retval)
return retval;
if (val & WRT_PRTCT)
chip->card_wp |= MS_CARD;
else
chip->card_wp &= ~MS_CARD;
i = 0;
RE_SEARCH:
/* Search Boot Block */
while (i < (MAX_DEFECTIVE_BLOCK + 2)) {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
return STATUS_FAIL;
}
retval = ms_read_extra_data(chip, i, 0, extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
i++;
continue;
}
if (extra[0] & BLOCK_OK) {
if (!(extra[1] & NOT_BOOT_BLOCK)) {
ms_card->boot_block = i;
break;
}
}
i++;
}
if (i == (MAX_DEFECTIVE_BLOCK + 2)) {
dev_dbg(rtsx_dev(chip), "No boot block found!");
return STATUS_FAIL;
}
for (j = 0; j < 3; j++) {
retval = ms_read_page(chip, ms_card->boot_block, j);
if (retval != STATUS_SUCCESS) {
if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR)) {
i = ms_card->boot_block + 1;
ms_set_err_code(chip, MS_NO_ERROR);
goto RE_SEARCH;
}
}
}
retval = ms_read_page(chip, ms_card->boot_block, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
/* Read MS system information as sys_info */
rtsx_init_cmd(chip);
for (i = 0; i < 96; i++)
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 0x1A0 + i, 0, 0);
retval = rtsx_send_cmd(chip, MS_CARD, 100);
if (retval < 0)
return STATUS_FAIL;
ptr = rtsx_get_cmd_data(chip);
memcpy(ms_card->raw_sys_info, ptr, 96);
/* Read useful block contents */
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, READ_REG_CMD, HEADER_ID0, 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, HEADER_ID1, 0, 0);
for (reg_addr = DISABLED_BLOCK0; reg_addr <= DISABLED_BLOCK3;
reg_addr++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
for (reg_addr = BLOCK_SIZE_0; reg_addr <= PAGE_SIZE_1; reg_addr++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, MS_device_type, 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, MS_4bit_support, 0, 0);
retval = rtsx_send_cmd(chip, MS_CARD, 100);
if (retval < 0)
return STATUS_FAIL;
ptr = rtsx_get_cmd_data(chip);
dev_dbg(rtsx_dev(chip), "Boot block data:\n");
dev_dbg(rtsx_dev(chip), "%*ph\n", 16, ptr);
/* Block ID error
* HEADER_ID0, HEADER_ID1
*/
if (ptr[0] != 0x00 || ptr[1] != 0x01) {
i = ms_card->boot_block + 1;
goto RE_SEARCH;
}
/* Page size error
* PAGE_SIZE_0, PAGE_SIZE_1
*/
if (ptr[12] != 0x02 || ptr[13] != 0x00) {
i = ms_card->boot_block + 1;
goto RE_SEARCH;
}
if (ptr[14] == 1 || ptr[14] == 3)
chip->card_wp |= MS_CARD;
/* BLOCK_SIZE_0, BLOCK_SIZE_1 */
block_size = ((u16)ptr[6] << 8) | ptr[7];
if (block_size == 0x0010) {
/* Block size 16KB */
ms_card->block_shift = 5;
ms_card->page_off = 0x1F;
} else if (block_size == 0x0008) {
/* Block size 8KB */
ms_card->block_shift = 4;
ms_card->page_off = 0x0F;
}
/* BLOCK_COUNT_0, BLOCK_COUNT_1 */
ms_card->total_block = ((u16)ptr[8] << 8) | ptr[9];
#ifdef SUPPORT_MAGIC_GATE
j = ptr[10];
if (ms_card->block_shift == 4) { /* 4MB or 8MB */
if (j < 2) { /* Effective block for 4MB: 0x1F0 */
ms_card->capacity = 0x1EE0;
} else { /* Effective block for 8MB: 0x3E0 */
ms_card->capacity = 0x3DE0;
}
} else { /* 16MB, 32MB, 64MB or 128MB */
if (j < 5) { /* Effective block for 16MB: 0x3E0 */
ms_card->capacity = 0x7BC0;
} else if (j < 0xA) { /* Effective block for 32MB: 0x7C0 */
ms_card->capacity = 0xF7C0;
} else if (j < 0x11) { /* Effective block for 64MB: 0xF80 */
ms_card->capacity = 0x1EF80;
} else { /* Effective block for 128MB: 0x1F00 */
ms_card->capacity = 0x3DF00;
}
}
#else
/* EBLOCK_COUNT_0, EBLOCK_COUNT_1 */
eblock_cnt = ((u16)ptr[10] << 8) | ptr[11];
ms_card->capacity = ((u32)eblock_cnt - 2) << ms_card->block_shift;
#endif
chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity;
/* Switch I/F Mode */
if (ptr[15]) {
retval = ms_set_rw_reg_addr(chip, 0, 0, SYSTEM_PARAM, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_register(chip, PPBUF_BASE2, 0xFF, 0x88);
if (retval)
return retval;
retval = rtsx_write_register(chip, PPBUF_BASE2 + 1, 0xFF, 0);
if (retval)
return retval;
retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG, 1,
NO_WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_register(chip, MS_CFG,
0x58 | MS_NO_CHECK_INT,
MS_BUS_WIDTH_4 |
PUSH_TIME_ODD |
MS_NO_CHECK_INT);
if (retval)
return retval;
ms_card->ms_type |= MS_4BIT;
}
if (CHK_MS4BIT(ms_card))
chip->card_bus_width[chip->card2lun[MS_CARD]] = 4;
else
chip->card_bus_width[chip->card2lun[MS_CARD]] = 1;
return STATUS_SUCCESS;
}
static int ms_init_l2p_tbl(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int size, i, seg_no, retval;
u16 defect_block, reg_addr;
u8 val1, val2;
ms_card->segment_cnt = ms_card->total_block >> 9;
dev_dbg(rtsx_dev(chip), "ms_card->segment_cnt = %d\n",
ms_card->segment_cnt);
size = ms_card->segment_cnt * sizeof(struct zone_entry);
ms_card->segment = vzalloc(size);
if (!ms_card->segment)
return STATUS_FAIL;
retval = ms_read_page(chip, ms_card->boot_block, 1);
if (retval != STATUS_SUCCESS)
goto INIT_FAIL;
reg_addr = PPBUF_BASE2;
for (i = 0; i < (((ms_card->total_block >> 9) * 10) + 1); i++) {
int block_no;
retval = rtsx_read_register(chip, reg_addr++, &val1);
if (retval != STATUS_SUCCESS)
goto INIT_FAIL;
retval = rtsx_read_register(chip, reg_addr++, &val2);
if (retval != STATUS_SUCCESS)
goto INIT_FAIL;
defect_block = ((u16)val1 << 8) | val2;
if (defect_block == 0xFFFF)
break;
seg_no = defect_block / 512;
block_no = ms_card->segment[seg_no].disable_count++;
ms_card->segment[seg_no].defect_list[block_no] = defect_block;
}
for (i = 0; i < ms_card->segment_cnt; i++) {
ms_card->segment[i].build_flag = 0;
ms_card->segment[i].l2p_table = NULL;
ms_card->segment[i].free_table = NULL;
ms_card->segment[i].get_index = 0;
ms_card->segment[i].set_index = 0;
ms_card->segment[i].unused_blk_cnt = 0;
dev_dbg(rtsx_dev(chip), "defective block count of segment %d is %d\n",
i, ms_card->segment[i].disable_count);
}
return STATUS_SUCCESS;
INIT_FAIL:
vfree(ms_card->segment);
ms_card->segment = NULL;
return STATUS_FAIL;
}
static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off)
{
struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
if (!ms_card->segment)
return 0xFFFF;
segment = &ms_card->segment[seg_no];
if (segment->l2p_table)
return segment->l2p_table[log_off];
return 0xFFFF;
}
static void ms_set_l2p_tbl(struct rtsx_chip *chip,
int seg_no, u16 log_off, u16 phy_blk)
{
struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
if (!ms_card->segment)
return;
segment = &ms_card->segment[seg_no];
if (segment->l2p_table)
segment->l2p_table[log_off] = phy_blk;
}
static void ms_set_unused_block(struct rtsx_chip *chip, u16 phy_blk)
{
struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
int seg_no;
seg_no = (int)phy_blk >> 9;
segment = &ms_card->segment[seg_no];
segment->free_table[segment->set_index++] = phy_blk;
if (segment->set_index >= MS_FREE_TABLE_CNT)
segment->set_index = 0;
segment->unused_blk_cnt++;
}
static u16 ms_get_unused_block(struct rtsx_chip *chip, int seg_no)
{
struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
u16 phy_blk;
segment = &ms_card->segment[seg_no];
if (segment->unused_blk_cnt <= 0)
return 0xFFFF;
phy_blk = segment->free_table[segment->get_index];
segment->free_table[segment->get_index++] = 0xFFFF;
if (segment->get_index >= MS_FREE_TABLE_CNT)
segment->get_index = 0;
segment->unused_blk_cnt--;
return phy_blk;
}
static const unsigned short ms_start_idx[] = {0, 494, 990, 1486, 1982, 2478,
2974, 3470, 3966, 4462, 4958,
5454, 5950, 6446, 6942, 7438,
7934};
static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk,
u16 log_off, u8 us1, u8 us2)
{
struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
int seg_no;
u16 tmp_blk;
seg_no = (int)phy_blk >> 9;
segment = &ms_card->segment[seg_no];
tmp_blk = segment->l2p_table[log_off];
if (us1 != us2) {
if (us1 == 0) {
if (!(chip->card_wp & MS_CARD))
ms_erase_block(chip, tmp_blk);
ms_set_unused_block(chip, tmp_blk);
segment->l2p_table[log_off] = phy_blk;
} else {
if (!(chip->card_wp & MS_CARD))
ms_erase_block(chip, phy_blk);
ms_set_unused_block(chip, phy_blk);
}
} else {
if (phy_blk < tmp_blk) {
if (!(chip->card_wp & MS_CARD))
ms_erase_block(chip, phy_blk);
ms_set_unused_block(chip, phy_blk);
} else {
if (!(chip->card_wp & MS_CARD))
ms_erase_block(chip, tmp_blk);
ms_set_unused_block(chip, tmp_blk);
segment->l2p_table[log_off] = phy_blk;
}
}
return STATUS_SUCCESS;
}
static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
{
struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
bool defect_flag;
int retval, table_size, disable_cnt, i;
u16 start, end, phy_blk, log_blk, tmp_blk, idx;
u8 extra[MS_EXTRA_SIZE], us1, us2;
dev_dbg(rtsx_dev(chip), "%s: %d\n", __func__, seg_no);
if (!ms_card->segment) {
retval = ms_init_l2p_tbl(chip);
if (retval != STATUS_SUCCESS)
return retval;
}
if (ms_card->segment[seg_no].build_flag) {
dev_dbg(rtsx_dev(chip), "l2p table of segment %d has been built\n",
seg_no);
return STATUS_SUCCESS;
}
if (seg_no == 0)
table_size = 494;
else
table_size = 496;
segment = &ms_card->segment[seg_no];
if (!segment->l2p_table) {
segment->l2p_table = vmalloc(array_size(table_size, 2));
if (!segment->l2p_table)
goto BUILD_FAIL;
}
memset((u8 *)(segment->l2p_table), 0xff, array_size(table_size, 2));
if (!segment->free_table) {
segment->free_table = vmalloc(array_size(MS_FREE_TABLE_CNT, 2));
if (!segment->free_table)
goto BUILD_FAIL;
}
memset((u8 *)(segment->free_table), 0xff, array_size(MS_FREE_TABLE_CNT, 2));
start = (u16)seg_no << 9;
end = (u16)(seg_no + 1) << 9;
disable_cnt = segment->disable_count;
segment->get_index = 0;
segment->set_index = 0;
segment->unused_blk_cnt = 0;
for (phy_blk = start; phy_blk < end; phy_blk++) {
if (disable_cnt) {
defect_flag = false;
for (i = 0; i < segment->disable_count; i++) {
if (phy_blk == segment->defect_list[i]) {
defect_flag = true;
break;
}
}
if (defect_flag) {
disable_cnt--;
continue;
}
}
retval = ms_read_extra_data(chip, phy_blk, 0,
extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
dev_dbg(rtsx_dev(chip), "read extra data fail\n");
ms_set_bad_block(chip, phy_blk);
continue;
}
if (seg_no == ms_card->segment_cnt - 1) {
if (!(extra[1] & NOT_TRANSLATION_TABLE)) {
if (!(chip->card_wp & MS_CARD)) {
retval = ms_erase_block(chip, phy_blk);
if (retval != STATUS_SUCCESS)
continue;
extra[2] = 0xff;
extra[3] = 0xff;
}
}
}
if (!(extra[0] & BLOCK_OK))
continue;
if (!(extra[1] & NOT_BOOT_BLOCK))
continue;
if ((extra[0] & PAGE_OK) != PAGE_OK)
continue;
log_blk = ((u16)extra[2] << 8) | extra[3];
if (log_blk == 0xFFFF) {
if (!(chip->card_wp & MS_CARD)) {
retval = ms_erase_block(chip, phy_blk);
if (retval != STATUS_SUCCESS)
continue;
}
ms_set_unused_block(chip, phy_blk);
continue;
}
if (log_blk < ms_start_idx[seg_no] ||
log_blk >= ms_start_idx[seg_no + 1]) {
if (!(chip->card_wp & MS_CARD)) {
retval = ms_erase_block(chip, phy_blk);
if (retval != STATUS_SUCCESS)
continue;
}
ms_set_unused_block(chip, phy_blk);
continue;
}
idx = log_blk - ms_start_idx[seg_no];
if (segment->l2p_table[idx] == 0xFFFF) {
segment->l2p_table[idx] = phy_blk;
continue;
}
us1 = extra[0] & 0x10;
tmp_blk = segment->l2p_table[idx];
retval = ms_read_extra_data(chip, tmp_blk, 0,
extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
continue;
us2 = extra[0] & 0x10;
(void)ms_arbitrate_l2p(chip, phy_blk,
log_blk - ms_start_idx[seg_no], us1, us2);
}
segment->build_flag = 1;
dev_dbg(rtsx_dev(chip), "unused block count: %d\n",
segment->unused_blk_cnt);
/* Logical Address Confirmation Process */
if (seg_no == ms_card->segment_cnt - 1) {
if (segment->unused_blk_cnt < 2)
chip->card_wp |= MS_CARD;
} else {
if (segment->unused_blk_cnt < 1)
chip->card_wp |= MS_CARD;
}
if (chip->card_wp & MS_CARD)
return STATUS_SUCCESS;
for (log_blk = ms_start_idx[seg_no];
log_blk < ms_start_idx[seg_no + 1]; log_blk++) {
idx = log_blk - ms_start_idx[seg_no];
if (segment->l2p_table[idx] == 0xFFFF) {
phy_blk = ms_get_unused_block(chip, seg_no);
if (phy_blk == 0xFFFF) {
chip->card_wp |= MS_CARD;
return STATUS_SUCCESS;
}
retval = ms_init_page(chip, phy_blk, log_blk, 0, 1);
if (retval != STATUS_SUCCESS)
goto BUILD_FAIL;
segment->l2p_table[idx] = phy_blk;
if (seg_no == ms_card->segment_cnt - 1) {
if (segment->unused_blk_cnt < 2) {
chip->card_wp |= MS_CARD;
return STATUS_SUCCESS;
}
} else {
if (segment->unused_blk_cnt < 1) {
chip->card_wp |= MS_CARD;
return STATUS_SUCCESS;
}
}
}
}
/* Make boot block be the first normal block */
if (seg_no == 0) {
for (log_blk = 0; log_blk < 494; log_blk++) {
tmp_blk = segment->l2p_table[log_blk];
if (tmp_blk < ms_card->boot_block) {
dev_dbg(rtsx_dev(chip), "Boot block is not the first normal block.\n");
if (chip->card_wp & MS_CARD)
break;
phy_blk = ms_get_unused_block(chip, 0);
retval = ms_copy_page(chip, tmp_blk, phy_blk,
log_blk, 0,
ms_card->page_off + 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
segment->l2p_table[log_blk] = phy_blk;
retval = ms_set_bad_block(chip, tmp_blk);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
}
}
return STATUS_SUCCESS;
BUILD_FAIL:
segment->build_flag = 0;
vfree(segment->l2p_table);
segment->l2p_table = NULL;
vfree(segment->free_table);
segment->free_table = NULL;
return STATUS_FAIL;
}
int reset_ms_card(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int seg_no = ms_card->total_block / 512 - 1;
int retval;
memset(ms_card, 0, sizeof(struct ms_info));
retval = enable_card_clock(chip, MS_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = select_card(chip, MS_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_card->ms_type = 0;
retval = reset_ms_pro(chip);
if (retval != STATUS_SUCCESS) {
if (ms_card->check_ms_flow) {
retval = reset_ms(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
return STATUS_FAIL;
}
}
retval = ms_set_init_para(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (!CHK_MSPRO(ms_card)) {
/* Build table for the last segment,
* to check if L2P table block exists, erasing it
*/
retval = ms_build_l2p_tbl(chip, seg_no);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
dev_dbg(rtsx_dev(chip), "ms_card->ms_type = 0x%x\n", ms_card->ms_type);
return STATUS_SUCCESS;
}
static int mspro_set_rw_cmd(struct rtsx_chip *chip,
u32 start_sec, u16 sec_cnt, u8 cmd)
{
int retval, i;
u8 data[8];
data[0] = cmd;
data[1] = (u8)(sec_cnt >> 8);
data[2] = (u8)sec_cnt;
data[3] = (u8)(start_sec >> 24);
data[4] = (u8)(start_sec >> 16);
data[5] = (u8)(start_sec >> 8);
data[6] = (u8)start_sec;
data[7] = 0;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_write_bytes(chip, PRO_EX_SET_CMD, 7,
WAIT_INT, data, 8);
if (retval == STATUS_SUCCESS)
break;
}
if (i == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
void mspro_stop_seq_mode(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
if (ms_card->seq_mode) {
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return;
ms_card->seq_mode = 0;
ms_card->total_sec_cnt = 0;
ms_send_cmd(chip, PRO_STOP, WAIT_INT);
rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
}
}
static inline int ms_auto_tune_clock(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
if (chip->asic_code) {
if (ms_card->ms_clock > 30)
ms_card->ms_clock -= 20;
} else {
if (ms_card->ms_clock == CLK_80)
ms_card->ms_clock = CLK_60;
else if (ms_card->ms_clock == CLK_60)
ms_card->ms_clock = CLK_40;
}
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
struct rtsx_chip *chip, u32 start_sector,
u16 sector_cnt)
{
struct ms_info *ms_card = &chip->ms_card;
bool mode_2k = false;
int retval;
u16 count;
u8 val, trans_mode, rw_tpc, rw_cmd;
ms_set_err_code(chip, MS_NO_ERROR);
ms_card->cleanup_counter = 0;
if (CHK_MSHG(ms_card)) {
if ((start_sector % 4) || (sector_cnt % 4)) {
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
rw_tpc = PRO_READ_LONG_DATA;
rw_cmd = PRO_READ_DATA;
} else {
rw_tpc = PRO_WRITE_LONG_DATA;
rw_cmd = PRO_WRITE_DATA;
}
} else {
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
rw_tpc = PRO_READ_QUAD_DATA;
rw_cmd = PRO_READ_2K_DATA;
} else {
rw_tpc = PRO_WRITE_QUAD_DATA;
rw_cmd = PRO_WRITE_2K_DATA;
}
mode_2k = true;
}
} else {
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
rw_tpc = PRO_READ_LONG_DATA;
rw_cmd = PRO_READ_DATA;
} else {
rw_tpc = PRO_WRITE_LONG_DATA;
rw_cmd = PRO_WRITE_DATA;
}
}
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (srb->sc_data_direction == DMA_FROM_DEVICE)
trans_mode = MS_TM_AUTO_READ;
else
trans_mode = MS_TM_AUTO_WRITE;
retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
if (retval)
return retval;
if (ms_card->seq_mode) {
if (ms_card->pre_dir != srb->sc_data_direction ||
((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) !=
start_sector) ||
(mode_2k && (ms_card->seq_mode & MODE_512_SEQ)) ||
(!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ)) ||
!(val & MS_INT_BREQ) ||
((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) {
ms_card->seq_mode = 0;
ms_card->total_sec_cnt = 0;
if (val & MS_INT_BREQ) {
retval = ms_send_cmd(chip, PRO_STOP, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
rtsx_write_register(chip, RBCTL, RB_FLUSH,
RB_FLUSH);
}
}
}
if (!ms_card->seq_mode) {
ms_card->total_sec_cnt = 0;
if (sector_cnt >= SEQ_START_CRITERIA) {
if ((ms_card->capacity - start_sector) > 0xFE00)
count = 0xFE00;
else
count = (u16)(ms_card->capacity - start_sector);
if (count > sector_cnt) {
if (mode_2k)
ms_card->seq_mode = MODE_2K_SEQ;
else
ms_card->seq_mode = MODE_512_SEQ;
}
} else {
count = sector_cnt;
}
retval = mspro_set_rw_cmd(chip, start_sector, count, rw_cmd);
if (retval != STATUS_SUCCESS) {
ms_card->seq_mode = 0;
return STATUS_FAIL;
}
}
retval = ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt,
WAIT_INT, mode_2k, scsi_sg_count(srb),
scsi_sglist(srb), scsi_bufflen(srb));
if (retval != STATUS_SUCCESS) {
ms_card->seq_mode = 0;
rtsx_read_register(chip, MS_TRANS_CFG, &val);
rtsx_clear_ms_error(chip);
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
chip->rw_need_retry = 0;
dev_dbg(rtsx_dev(chip), "No card exist, exit %s\n",
__func__);
return STATUS_FAIL;
}
if (val & MS_INT_BREQ)
ms_send_cmd(chip, PRO_STOP, WAIT_INT);
if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
dev_dbg(rtsx_dev(chip), "MSPro CRC error, tune clock!\n");
chip->rw_need_retry = 1;
ms_auto_tune_clock(chip);
}
return retval;
}
if (ms_card->seq_mode) {
ms_card->pre_sec_addr = start_sector;
ms_card->pre_sec_cnt = sector_cnt;
ms_card->pre_dir = srb->sc_data_direction;
ms_card->total_sec_cnt += sector_cnt;
}
return STATUS_SUCCESS;
}
static int mspro_read_format_progress(struct rtsx_chip *chip,
const int short_data_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u32 total_progress, cur_progress;
u8 cnt, tmp;
u8 data[8];
dev_dbg(rtsx_dev(chip), "%s, short_data_len = %d\n", __func__,
short_data_len);
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
return STATUS_FAIL;
}
if (!(tmp & MS_INT_BREQ)) {
if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK |
MS_INT_ERR)) == MS_INT_CED) {
ms_card->format_status = FORMAT_SUCCESS;
return STATUS_SUCCESS;
}
ms_card->format_status = FORMAT_FAIL;
return STATUS_FAIL;
}
if (short_data_len >= 256)
cnt = 0;
else
cnt = (u8)short_data_len;
retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT,
MS_NO_CHECK_INT);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT,
data, 8);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
return STATUS_FAIL;
}
total_progress = (data[0] << 24) | (data[1] << 16) |
(data[2] << 8) | data[3];
cur_progress = (data[4] << 24) | (data[5] << 16) |
(data[6] << 8) | data[7];
dev_dbg(rtsx_dev(chip), "total_progress = %d, cur_progress = %d\n",
total_progress, cur_progress);
if (total_progress == 0) {
ms_card->progress = 0;
} else {
u64 ulltmp = (u64)cur_progress * (u64)65535;
do_div(ulltmp, total_progress);
ms_card->progress = (u16)ulltmp;
}
dev_dbg(rtsx_dev(chip), "progress = %d\n", ms_card->progress);
for (i = 0; i < 5000; i++) {
retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
return STATUS_FAIL;
}
if (tmp & (MS_INT_CED | MS_INT_CMDNK |
MS_INT_BREQ | MS_INT_ERR))
break;
wait_timeout(1);
}
retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT, 0);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
return STATUS_FAIL;
}
if (i == 5000) {
ms_card->format_status = FORMAT_FAIL;
return STATUS_FAIL;
}
if (tmp & (MS_INT_CMDNK | MS_INT_ERR)) {
ms_card->format_status = FORMAT_FAIL;
return STATUS_FAIL;
}
if (tmp & MS_INT_CED) {
ms_card->format_status = FORMAT_SUCCESS;
ms_card->pro_under_formatting = 0;
} else if (tmp & MS_INT_BREQ) {
ms_card->format_status = FORMAT_IN_PROGRESS;
} else {
ms_card->format_status = FORMAT_FAIL;
ms_card->pro_under_formatting = 0;
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
void mspro_polling_format_status(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int i;
if (ms_card->pro_under_formatting &&
(rtsx_get_stat(chip) != RTSX_STAT_SS)) {
rtsx_set_stat(chip, RTSX_STAT_RUN);
for (i = 0; i < 65535; i++) {
mspro_read_format_progress(chip, MS_SHORT_DATA_LEN);
if (ms_card->format_status != FORMAT_IN_PROGRESS)
break;
}
}
}
int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
int short_data_len, bool quick_format)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 buf[8], tmp;
u16 para;
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, PRO_TPC_PARM, 0x01);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
memset(buf, 0, 2);
switch (short_data_len) {
case 32:
buf[0] = 0;
break;
case 64:
buf[0] = 1;
break;
case 128:
buf[0] = 2;
break;
case 256:
default:
buf[0] = 3;
break;
}
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_write_bytes(chip, PRO_WRITE_REG, 1,
NO_WAIT_INT, buf, 2);
if (retval == STATUS_SUCCESS)
break;
}
if (i == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
if (quick_format)
para = 0x0000;
else
para = 0x0001;
retval = mspro_set_rw_cmd(chip, 0, para, PRO_FORMAT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp);
if (retval)
return retval;
if (tmp & (MS_INT_CMDNK | MS_INT_ERR))
return STATUS_FAIL;
if ((tmp & (MS_INT_BREQ | MS_INT_CED)) == MS_INT_BREQ) {
ms_card->pro_under_formatting = 1;
ms_card->progress = 0;
ms_card->format_status = FORMAT_IN_PROGRESS;
return STATUS_SUCCESS;
}
if (tmp & MS_INT_CED) {
ms_card->pro_under_formatting = 0;
ms_card->progress = 0;
ms_card->format_status = FORMAT_SUCCESS;
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_NO_SENSE);
return STATUS_SUCCESS;
}
return STATUS_FAIL;
}
static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
u16 log_blk, u8 start_page, u8 end_page,
u8 *buf, unsigned int *index,
unsigned int *offset)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 extra[MS_EXTRA_SIZE], page_addr, val, trans_cfg, data[6];
u8 *ptr;
retval = ms_read_extra_data(chip, phy_blk, start_page,
extra, MS_EXTRA_SIZE);
if (retval == STATUS_SUCCESS) {
if ((extra[1] & 0x30) != 0x30) {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
return STATUS_FAIL;
}
}
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (CHK_MS4BIT(ms_card))
data[0] = 0x88;
else
data[0] = 0x80;
data[1] = 0;
data[2] = (u8)(phy_blk >> 8);
data[3] = (u8)phy_blk;
data[4] = 0;
data[5] = start_page;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT,
data, 6);
if (retval == STATUS_SUCCESS)
break;
}
if (i == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ptr = buf;
for (page_addr = start_page; page_addr < end_page; page_addr++) {
ms_set_err_code(chip, MS_NO_ERROR);
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
return STATUS_FAIL;
}
if (val & INT_REG_ERR) {
if (val & INT_REG_BREQ) {
retval = ms_read_status_reg(chip);
if (retval != STATUS_SUCCESS) {
if (!(chip->card_wp & MS_CARD)) {
reset_ms(chip);
ms_set_page_status
(log_blk, set_PS_NG,
extra,
MS_EXTRA_SIZE);
ms_write_extra_data
(chip, phy_blk,
page_addr, extra,
MS_EXTRA_SIZE);
}
ms_set_err_code(chip,
MS_FLASH_READ_ERROR);
return STATUS_FAIL;
}
} else {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
return STATUS_FAIL;
}
} else {
if (!(val & INT_REG_BREQ)) {
ms_set_err_code(chip, MS_BREQ_ERROR);
return STATUS_FAIL;
}
}
if (page_addr == (end_page - 1)) {
if (!(val & INT_REG_CED)) {
retval = ms_send_cmd(chip, BLOCK_END, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT,
&val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (!(val & INT_REG_CED)) {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
return STATUS_FAIL;
}
trans_cfg = NO_WAIT_INT;
} else {
trans_cfg = WAIT_INT;
}
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
0xFF, trans_cfg);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, RING_BUFFER);
trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
MS_TRANSFER_START | MS_TM_NORMAL_READ);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512,
scsi_sg_count(chip->srb),
index, offset,
DMA_FROM_DEVICE,
chip->ms_timeout);
if (retval < 0) {
if (retval == -ETIMEDOUT) {
ms_set_err_code(chip, MS_TO_ERROR);
rtsx_clear_ms_error(chip);
return STATUS_TIMEDOUT;
}
retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
if (retval != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_TO_ERROR);
rtsx_clear_ms_error(chip);
return STATUS_TIMEDOUT;
}
if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
ms_set_err_code(chip, MS_CRC16_ERROR);
rtsx_clear_ms_error(chip);
return STATUS_FAIL;
}
}
if (scsi_sg_count(chip->srb) == 0)
ptr += 512;
}
return STATUS_SUCCESS;
}
static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
u16 new_blk, u16 log_blk, u8 start_page,
u8 end_page, u8 *buf, unsigned int *index,
unsigned int *offset)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 page_addr, val, data[16];
u8 *ptr;
if (!start_page) {
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (CHK_MS4BIT(ms_card))
data[0] = 0x88;
else
data[0] = 0x80;
data[1] = 0;
data[2] = (u8)(old_blk >> 8);
data[3] = (u8)old_blk;
data[4] = 0x80;
data[5] = 0;
data[6] = 0xEF;
data[7] = 0xFF;
retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT,
data, 8);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1,
NO_WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
SYSTEM_PARAM, (6 + MS_EXTRA_SIZE));
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
if (CHK_MS4BIT(ms_card))
data[0] = 0x88;
else
data[0] = 0x80;
data[1] = 0;
data[2] = (u8)(new_blk >> 8);
data[3] = (u8)new_blk;
if ((end_page - start_page) == 1)
data[4] = 0x20;
else
data[4] = 0;
data[5] = start_page;
data[6] = 0xF8;
data[7] = 0xFF;
data[8] = (u8)(log_blk >> 8);
data[9] = (u8)log_blk;
for (i = 0x0A; i < 0x10; i++)
data[i] = 0xFF;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_write_bytes(chip, WRITE_REG, 6 + MS_EXTRA_SIZE,
NO_WAIT_INT, data, 16);
if (retval == STATUS_SUCCESS)
break;
}
if (i == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
if (retval == STATUS_SUCCESS)
break;
}
if (i == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
ptr = buf;
for (page_addr = start_page; page_addr < end_page; page_addr++) {
ms_set_err_code(chip, MS_NO_ERROR);
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
return STATUS_FAIL;
}
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
return STATUS_FAIL;
}
if (val & INT_REG_ERR) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
return STATUS_FAIL;
}
if (!(val & INT_REG_BREQ)) {
ms_set_err_code(chip, MS_BREQ_ERROR);
return STATUS_FAIL;
}
udelay(30);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
0xFF, WRITE_PAGE_DATA);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
0xFF, WAIT_INT);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, RING_BUFFER);
trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512,
scsi_sg_count(chip->srb),
index, offset,
DMA_TO_DEVICE,
chip->ms_timeout);
if (retval < 0) {
ms_set_err_code(chip, MS_TO_ERROR);
rtsx_clear_ms_error(chip);
if (retval == -ETIMEDOUT)
return STATUS_TIMEDOUT;
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if ((end_page - start_page) == 1) {
if (!(val & INT_REG_CED)) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
return STATUS_FAIL;
}
} else {
if (page_addr == (end_page - 1)) {
if (!(val & INT_REG_CED)) {
retval = ms_send_cmd(chip, BLOCK_END,
WAIT_INT);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, GET_INT, 1,
NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
if (page_addr == (end_page - 1) ||
page_addr == ms_card->page_off) {
if (!(val & INT_REG_CED)) {
ms_set_err_code(chip,
MS_FLASH_WRITE_ERROR);
return STATUS_FAIL;
}
}
}
if (scsi_sg_count(chip->srb) == 0)
ptr += 512;
}
return STATUS_SUCCESS;
}
static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
u16 log_blk, u8 page_off)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, seg_no;
retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
page_off, ms_card->page_off + 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
seg_no = old_blk >> 9;
if (MS_TST_BAD_BLOCK_FLG(ms_card)) {
MS_CLR_BAD_BLOCK_FLG(ms_card);
ms_set_bad_block(chip, old_blk);
} else {
retval = ms_erase_block(chip, old_blk);
if (retval == STATUS_SUCCESS)
ms_set_unused_block(chip, old_blk);
}
ms_set_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no], new_blk);
return STATUS_SUCCESS;
}
static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
u16 log_blk, u8 start_page)
{
int retval;
if (start_page) {
retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
0, start_page);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
#ifdef MS_DELAY_WRITE
int ms_delay_write(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
struct ms_delay_write_tag *delay_write = &ms_card->delay_write;
int retval;
if (delay_write->delay_write_flag) {
retval = ms_set_init_para(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
delay_write->delay_write_flag = 0;
retval = ms_finish_write(chip,
delay_write->old_phyblock,
delay_write->new_phyblock,
delay_write->logblock,
delay_write->pageoff);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
#endif
static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
}
static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
u32 start_sector, u16 sector_cnt)
{
struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
int retval, seg_no;
unsigned int index = 0, offset = 0;
u16 old_blk = 0, new_blk = 0, log_blk, total_sec_cnt = sector_cnt;
u8 start_page, end_page = 0, page_cnt;
u8 *ptr;
#ifdef MS_DELAY_WRITE
struct ms_delay_write_tag *delay_write = &ms_card->delay_write;
#endif
ms_set_err_code(chip, MS_NO_ERROR);
ms_card->cleanup_counter = 0;
ptr = (u8 *)scsi_sglist(srb);
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
ms_rw_fail(srb, chip);
return STATUS_FAIL;
}
log_blk = (u16)(start_sector >> ms_card->block_shift);
start_page = (u8)(start_sector & ms_card->page_off);
for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1; seg_no++) {
if (log_blk < ms_start_idx[seg_no + 1])
break;
}
if (ms_card->segment[seg_no].build_flag == 0) {
retval = ms_build_l2p_tbl(chip, seg_no);
if (retval != STATUS_SUCCESS) {
chip->card_fail |= MS_CARD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
}
if (srb->sc_data_direction == DMA_TO_DEVICE) {
#ifdef MS_DELAY_WRITE
if (delay_write->delay_write_flag &&
delay_write->logblock == log_blk &&
start_page > delay_write->pageoff) {
delay_write->delay_write_flag = 0;
retval = ms_copy_page(chip,
delay_write->old_phyblock,
delay_write->new_phyblock,
log_blk,
delay_write->pageoff, start_page);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
old_blk = delay_write->old_phyblock;
new_blk = delay_write->new_phyblock;
} else if (delay_write->delay_write_flag &&
(delay_write->logblock == log_blk) &&
(start_page == delay_write->pageoff)) {
delay_write->delay_write_flag = 0;
old_blk = delay_write->old_phyblock;
new_blk = delay_write->new_phyblock;
} else {
retval = ms_delay_write(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
#endif
old_blk = ms_get_l2p_tbl
(chip, seg_no,
log_blk - ms_start_idx[seg_no]);
new_blk = ms_get_unused_block(chip, seg_no);
if (old_blk == 0xFFFF || new_blk == 0xFFFF) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
retval = ms_prepare_write(chip, old_blk, new_blk,
log_blk, start_page);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, MS_CARD) !=
STATUS_SUCCESS) {
set_sense_type
(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
#ifdef MS_DELAY_WRITE
}
#endif
} else {
#ifdef MS_DELAY_WRITE
retval = ms_delay_write(chip);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return STATUS_FAIL;
}
#endif
old_blk = ms_get_l2p_tbl(chip, seg_no,
log_blk - ms_start_idx[seg_no]);
if (old_blk == 0xFFFF) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return STATUS_FAIL;
}
}
dev_dbg(rtsx_dev(chip), "seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n",
seg_no, old_blk, new_blk);
while (total_sec_cnt) {
if ((start_page + total_sec_cnt) > (ms_card->page_off + 1))
end_page = ms_card->page_off + 1;
else
end_page = start_page + (u8)total_sec_cnt;
page_cnt = end_page - start_page;
dev_dbg(rtsx_dev(chip), "start_page = %d, end_page = %d, page_cnt = %d\n",
start_page, end_page, page_cnt);
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
retval = ms_read_multiple_pages(chip,
old_blk, log_blk,
start_page, end_page,
ptr, &index, &offset);
} else {
retval = ms_write_multiple_pages(chip, old_blk, new_blk,
log_blk, start_page,
end_page, ptr, &index,
&offset);
}
if (retval != STATUS_SUCCESS) {
toggle_gpio(chip, 1);
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
ms_rw_fail(srb, chip);
return STATUS_FAIL;
}
if (srb->sc_data_direction == DMA_TO_DEVICE) {
if (end_page == (ms_card->page_off + 1)) {
retval = ms_erase_block(chip, old_blk);
if (retval == STATUS_SUCCESS)
ms_set_unused_block(chip, old_blk);
ms_set_l2p_tbl(chip, seg_no,
log_blk - ms_start_idx[seg_no],
new_blk);
}
}
total_sec_cnt -= page_cnt;
if (scsi_sg_count(srb) == 0)
ptr += page_cnt * 512;
if (total_sec_cnt == 0)
break;
log_blk++;
for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1;
seg_no++) {
if (log_blk < ms_start_idx[seg_no + 1])
break;
}
if (ms_card->segment[seg_no].build_flag == 0) {
retval = ms_build_l2p_tbl(chip, seg_no);
if (retval != STATUS_SUCCESS) {
chip->card_fail |= MS_CARD;
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
}
old_blk = ms_get_l2p_tbl(chip, seg_no,
log_blk - ms_start_idx[seg_no]);
if (old_blk == 0xFFFF) {
ms_rw_fail(srb, chip);
return STATUS_FAIL;
}
if (srb->sc_data_direction == DMA_TO_DEVICE) {
new_blk = ms_get_unused_block(chip, seg_no);
if (new_blk == 0xFFFF) {
ms_rw_fail(srb, chip);
return STATUS_FAIL;
}
}
dev_dbg(rtsx_dev(chip), "seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n",
seg_no, old_blk, new_blk);
start_page = 0;
}
if (srb->sc_data_direction == DMA_TO_DEVICE) {
if (end_page < (ms_card->page_off + 1)) {
#ifdef MS_DELAY_WRITE
delay_write->delay_write_flag = 1;
delay_write->old_phyblock = old_blk;
delay_write->new_phyblock = new_blk;
delay_write->logblock = log_blk;
delay_write->pageoff = end_page;
#else
retval = ms_finish_write(chip, old_blk, new_blk,
log_blk, end_page);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, MS_CARD) !=
STATUS_SUCCESS) {
set_sense_type
(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
ms_rw_fail(srb, chip);
return STATUS_FAIL;
}
#endif
}
}
scsi_set_resid(srb, 0);
return STATUS_SUCCESS;
}
int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
u32 start_sector, u16 sector_cnt)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
if (CHK_MSPRO(ms_card))
retval = mspro_rw_multi_sector(srb, chip, start_sector,
sector_cnt);
else
retval = ms_rw_multi_sector(srb, chip, start_sector,
sector_cnt);
return retval;
}
void ms_free_l2p_tbl(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int i = 0;
if (ms_card->segment) {
for (i = 0; i < ms_card->segment_cnt; i++) {
vfree(ms_card->segment[i].l2p_table);
ms_card->segment[i].l2p_table = NULL;
vfree(ms_card->segment[i].free_table);
ms_card->segment[i].free_table = NULL;
}
vfree(ms_card->segment);
ms_card->segment = NULL;
}
}
#ifdef SUPPORT_MAGIC_GATE
#ifdef READ_BYTES_WAIT_INT
static int ms_poll_int(struct rtsx_chip *chip)
{
int retval;
u8 val;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANS_CFG, MS_INT_CED, MS_INT_CED);
retval = rtsx_send_cmd(chip, MS_CARD, 5000);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
val = *rtsx_get_cmd_data(chip);
if (val & MS_INT_ERR)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
#endif
#ifdef MS_SAMPLE_INT_ERR
static int check_ms_err(struct rtsx_chip *chip)
{
int retval;
u8 val;
retval = rtsx_read_register(chip, MS_TRANSFER, &val);
if (retval != STATUS_SUCCESS)
return 1;
if (val & MS_TRANSFER_ERR)
return 1;
retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
if (retval != STATUS_SUCCESS)
return 1;
if (val & (MS_INT_ERR | MS_INT_CMDNK))
return 1;
return 0;
}
#else
static int check_ms_err(struct rtsx_chip *chip)
{
int retval;
u8 val;
retval = rtsx_read_register(chip, MS_TRANSFER, &val);
if (retval != STATUS_SUCCESS)
return 1;
if (val & MS_TRANSFER_ERR)
return 1;
return 0;
}
#endif
static int mg_send_ex_cmd(struct rtsx_chip *chip, u8 cmd, u8 entry_num)
{
int retval, i;
u8 data[8];
data[0] = cmd;
data[1] = 0;
data[2] = 0;
data[3] = 0;
data[4] = 0;
data[5] = 0;
data[6] = entry_num;
data[7] = 0;
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_write_bytes(chip, PRO_EX_SET_CMD, 7, WAIT_INT,
data, 8);
if (retval == STATUS_SUCCESS)
break;
}
if (i == MS_MAX_RETRY_COUNT)
return STATUS_FAIL;
if (check_ms_err(chip)) {
rtsx_clear_ms_error(chip);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
u8 mg_entry_num)
{
int retval;
u8 buf[6];
if (type == 0)
retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_TPC_PARM, 1);
else
retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_DATA_COUNT1, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
buf[0] = 0;
buf[1] = 0;
if (type == 1) {
buf[2] = 0;
buf[3] = 0;
buf[4] = 0;
buf[5] = mg_entry_num;
}
retval = ms_write_bytes(chip, PRO_WRITE_REG, (type == 0) ? 1 : 6,
NO_WAIT_INT, buf, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
int i;
unsigned int lun = SCSI_LUN(srb);
u8 buf1[32], buf2[12];
if (scsi_bufflen(srb) < 12) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return STATUS_FAIL;
}
ms_cleanup_work(chip);
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = mg_send_ex_cmd(chip, MG_SET_LID, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
return STATUS_FAIL;
}
memset(buf1, 0, 32);
rtsx_stor_get_xfer_buf(buf2, min_t(int, 12, scsi_bufflen(srb)), srb);
for (i = 0; i < 8; i++)
buf1[8 + i] = buf2[4 + i];
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
buf1, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
return STATUS_FAIL;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
rtsx_clear_ms_error(chip);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
int bufflen;
unsigned int lun = SCSI_LUN(srb);
u8 *buf = NULL;
ms_cleanup_work(chip);
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
buf = kmalloc(1540, GFP_KERNEL);
if (!buf)
return STATUS_ERROR;
buf[0] = 0x04;
buf[1] = 0x1A;
buf[2] = 0x00;
buf[3] = 0x00;
retval = mg_send_ex_cmd(chip, MG_GET_LEKB, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
goto free_buffer;
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
3, WAIT_INT, 0, 0, buf + 4, 1536);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
goto free_buffer;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
retval = STATUS_FAIL;
goto free_buffer;
}
bufflen = min_t(int, 1052, scsi_bufflen(srb));
rtsx_stor_set_xfer_buf(buf, bufflen, srb);
free_buffer:
kfree(buf);
return retval;
}
int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
int bufflen;
int i;
unsigned int lun = SCSI_LUN(srb);
u8 buf[32];
ms_cleanup_work(chip);
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = mg_send_ex_cmd(chip, MG_GET_ID, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
buf, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
return STATUS_FAIL;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
rtsx_clear_ms_error(chip);
return STATUS_FAIL;
}
memcpy(ms_card->magic_gate_id, buf, 16);
#ifdef READ_BYTES_WAIT_INT
retval = ms_poll_int(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
return STATUS_FAIL;
}
#endif
retval = mg_send_ex_cmd(chip, MG_SET_RD, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
return STATUS_FAIL;
}
bufflen = min_t(int, 12, scsi_bufflen(srb));
rtsx_stor_get_xfer_buf(buf, bufflen, srb);
for (i = 0; i < 8; i++)
buf[i] = buf[4 + i];
for (i = 0; i < 24; i++)
buf[8 + i] = 0;
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA,
32, WAIT_INT, buf, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
return STATUS_FAIL;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
rtsx_clear_ms_error(chip);
return STATUS_FAIL;
}
ms_card->mg_auth = 0;
return STATUS_SUCCESS;
}
int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
int bufflen;
unsigned int lun = SCSI_LUN(srb);
u8 buf1[32], buf2[36];
ms_cleanup_work(chip);
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = mg_send_ex_cmd(chip, MG_MAKE_RMS, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
buf1, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
return STATUS_FAIL;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
return STATUS_FAIL;
}
buf2[0] = 0x00;
buf2[1] = 0x22;
buf2[2] = 0x00;
buf2[3] = 0x00;
memcpy(buf2 + 4, ms_card->magic_gate_id, 16);
memcpy(buf2 + 20, buf1, 16);
bufflen = min_t(int, 36, scsi_bufflen(srb));
rtsx_stor_set_xfer_buf(buf2, bufflen, srb);
#ifdef READ_BYTES_WAIT_INT
retval = ms_poll_int(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
return STATUS_FAIL;
}
#endif
return STATUS_SUCCESS;
}
int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
int i;
int bufflen;
unsigned int lun = SCSI_LUN(srb);
u8 buf[32];
ms_cleanup_work(chip);
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = mg_send_ex_cmd(chip, MG_MAKE_KSE, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
return STATUS_FAIL;
}
bufflen = min_t(int, 12, scsi_bufflen(srb));
rtsx_stor_get_xfer_buf(buf, bufflen, srb);
for (i = 0; i < 8; i++)
buf[i] = buf[4 + i];
for (i = 0; i < 24; i++)
buf[8 + i] = 0;
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
buf, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
return STATUS_FAIL;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
return STATUS_FAIL;
}
ms_card->mg_auth = 1;
return STATUS_SUCCESS;
}
int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
int bufflen;
unsigned int lun = SCSI_LUN(srb);
u8 *buf = NULL;
ms_cleanup_work(chip);
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
buf = kmalloc(1028, GFP_KERNEL);
if (!buf)
return STATUS_ERROR;
buf[0] = 0x04;
buf[1] = 0x02;
buf[2] = 0x00;
buf[3] = 0x00;
retval = mg_send_ex_cmd(chip, MG_GET_IBD, ms_card->mg_entry_num);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
goto free_buffer;
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
2, WAIT_INT, 0, 0, buf + 4, 1024);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_clear_ms_error(chip);
goto free_buffer;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_clear_ms_error(chip);
retval = STATUS_FAIL;
goto free_buffer;
}
bufflen = min_t(int, 1028, scsi_bufflen(srb));
rtsx_stor_set_xfer_buf(buf, bufflen, srb);
free_buffer:
kfree(buf);
return retval;
}
int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
int bufflen;
#ifdef MG_SET_ICV_SLOW
int i;
#endif
unsigned int lun = SCSI_LUN(srb);
u8 *buf = NULL;
ms_cleanup_work(chip);
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
buf = kmalloc(1028, GFP_KERNEL);
if (!buf)
return STATUS_ERROR;
bufflen = min_t(int, 1028, scsi_bufflen(srb));
rtsx_stor_get_xfer_buf(buf, bufflen, srb);
retval = mg_send_ex_cmd(chip, MG_SET_IBD, ms_card->mg_entry_num);
if (retval != STATUS_SUCCESS) {
if (ms_card->mg_auth == 0) {
if ((buf[5] & 0xC0) != 0)
set_sense_type
(chip, lun,
SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
else
set_sense_type(chip, lun,
SENSE_TYPE_MG_WRITE_ERR);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
goto set_ICV_finish;
}
#ifdef MG_SET_ICV_SLOW
for (i = 0; i < 2; i++) {
udelay(50);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
0xFF, PRO_WRITE_LONG_DATA);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, RING_BUFFER);
trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data(chip, MS_CARD, buf + 4 + i * 512,
512, 0, DMA_TO_DEVICE, 3000);
if (retval < 0 || check_ms_err(chip)) {
rtsx_clear_ms_error(chip);
if (ms_card->mg_auth == 0) {
if ((buf[5] & 0xC0) != 0)
set_sense_type
(chip, lun,
SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
else
set_sense_type(chip, lun,
SENSE_TYPE_MG_WRITE_ERR);
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MG_WRITE_ERR);
}
retval = STATUS_FAIL;
goto set_ICV_finish;
}
}
#else
retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA,
2, WAIT_INT, 0, 0, buf + 4, 1024);
if (retval != STATUS_SUCCESS || check_ms_err(chip)) {
rtsx_clear_ms_error(chip);
if (ms_card->mg_auth == 0) {
if ((buf[5] & 0xC0) != 0)
set_sense_type
(chip, lun,
SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
else
set_sense_type(chip, lun,
SENSE_TYPE_MG_WRITE_ERR);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
goto set_ICV_finish;
}
#endif
set_ICV_finish:
kfree(buf);
return retval;
}
#endif /* SUPPORT_MAGIC_GATE */
void ms_cleanup_work(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
if (CHK_MSPRO(ms_card)) {
if (ms_card->seq_mode) {
dev_dbg(rtsx_dev(chip), "MS Pro: stop transmission\n");
mspro_stop_seq_mode(chip);
ms_card->cleanup_counter = 0;
}
if (CHK_MSHG(ms_card)) {
rtsx_write_register(chip, MS_CFG,
MS_2K_SECTOR_MODE, 0x00);
}
}
#ifdef MS_DELAY_WRITE
else if ((!CHK_MSPRO(ms_card)) &&
ms_card->delay_write.delay_write_flag) {
dev_dbg(rtsx_dev(chip), "MS: delay write\n");
ms_delay_write(chip);
ms_card->cleanup_counter = 0;
}
#endif
}
int ms_power_off_card3v3(struct rtsx_chip *chip)
{
int retval;
retval = disable_card_clock(chip, MS_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (chip->asic_code) {
retval = ms_pull_ctl_disable(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL,
FPGA_MS_PULL_CTL_BIT | 0x20,
FPGA_MS_PULL_CTL_BIT);
if (retval)
return retval;
}
retval = rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN, 0);
if (retval)
return retval;
if (!chip->ft2_fast_mode) {
retval = card_power_off(chip, MS_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
int release_ms_card(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
#ifdef MS_DELAY_WRITE
ms_card->delay_write.delay_write_flag = 0;
#endif
ms_card->pro_under_formatting = 0;
chip->card_ready &= ~MS_CARD;
chip->card_fail &= ~MS_CARD;
chip->card_wp &= ~MS_CARD;
ms_free_l2p_tbl(chip);
memset(ms_card->raw_sys_info, 0, 96);
#ifdef SUPPORT_PCGL_1P18
memset(ms_card->raw_model_name, 0, 48);
#endif
retval = ms_power_off_card3v3(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
| linux-master | drivers/staging/rts5208/ms.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG ([email protected])
* Micky Ching ([email protected])
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include "rtsx.h"
#include "ms.h"
#include "sd.h"
#include "xd.h"
MODULE_DESCRIPTION("Realtek PCI-Express card reader rts5208/rts5288 driver");
MODULE_LICENSE("GPL");
static unsigned int delay_use = 1;
module_param(delay_use, uint, 0644);
MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
static int ss_en;
module_param(ss_en, int, 0644);
MODULE_PARM_DESC(ss_en, "enable selective suspend");
static int ss_interval = 50;
module_param(ss_interval, int, 0644);
MODULE_PARM_DESC(ss_interval, "Interval to enter ss state in seconds");
static int auto_delink_en;
module_param(auto_delink_en, int, 0644);
MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
static unsigned char aspm_l0s_l1_en;
module_param(aspm_l0s_l1_en, byte, 0644);
MODULE_PARM_DESC(aspm_l0s_l1_en, "enable device aspm");
static int msi_en;
module_param(msi_en, int, 0644);
MODULE_PARM_DESC(msi_en, "enable msi");
static irqreturn_t rtsx_interrupt(int irq, void *dev_id);
/***********************************************************************
* Host functions
***********************************************************************/
static const char *host_info(struct Scsi_Host *host)
{
return "SCSI emulation for PCI-Express Mass Storage devices";
}
static int slave_alloc(struct scsi_device *sdev)
{
/*
* Set the INQUIRY transfer length to 36. We don't use any of
* the extra data and many devices choke if asked for more or
* less than 36 bytes.
*/
sdev->inquiry_len = 36;
return 0;
}
static int slave_configure(struct scsi_device *sdev)
{
/*
* Scatter-gather buffers (all but the last) must have a length
* divisible by the bulk maxpacket size. Otherwise a data packet
* would end up being short, causing a premature end to the data
* transfer. Since high-speed bulk pipes have a maxpacket size
* of 512, we'll use that as the scsi device queue's DMA alignment
* mask. Guaranteeing proper alignment of the first buffer will
* have the desired effect because, except at the beginning and
* the end, scatter-gather buffers follow page boundaries.
*/
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
/* Set the SCSI level to at least 2. We'll leave it at 3 if that's
* what is originally reported. We need this to avoid confusing
* the SCSI layer with devices that report 0 or 1, but need 10-byte
* commands (ala ATAPI devices behind certain bridges, or devices
* which simply have broken INQUIRY data).
*
* NOTE: This means /dev/sg programs (ala cdrecord) will get the
* actual information. This seems to be the preference for
* programs like that.
*
* NOTE: This also means that /proc/scsi/scsi and sysfs may report
* the actual value or the modified one, depending on where the
* data comes from.
*/
if (sdev->scsi_level < SCSI_2) {
sdev->scsi_level = SCSI_2;
sdev->sdev_target->scsi_level = SCSI_2;
}
return 0;
}
/***********************************************************************
* /proc/scsi/ functions
***********************************************************************/
/* we use this macro to help us write into the buffer */
#undef SPRINTF
#define SPRINTF(args...) \
do { \
if (pos < buffer + length) \
pos += sprintf(pos, ## args); \
} while (0)
/* queue a command */
/* This is always called with scsi_lock(host) held */
static int queuecommand_lck(struct scsi_cmnd *srb)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
struct rtsx_chip *chip = dev->chip;
/* check for state-transition errors */
if (chip->srb) {
dev_err(&dev->pci->dev, "Error: chip->srb = %p\n",
chip->srb);
return SCSI_MLQUEUE_HOST_BUSY;
}
/* fail the command if we are disconnecting */
if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
dev_info(&dev->pci->dev, "Fail command during disconnect\n");
srb->result = DID_NO_CONNECT << 16;
done(srb);
return 0;
}
/* enqueue the command and wake up the control thread */
chip->srb = srb;
complete(&dev->cmnd_ready);
return 0;
}
static DEF_SCSI_QCMD(queuecommand)
/***********************************************************************
* Error handling functions
***********************************************************************/
/* Command timeout and abort */
static int command_abort(struct scsi_cmnd *srb)
{
struct Scsi_Host *host = srb->device->host;
struct rtsx_dev *dev = host_to_rtsx(host);
struct rtsx_chip *chip = dev->chip;
scsi_lock(host);
/* Is this command still active? */
if (chip->srb != srb) {
scsi_unlock(host);
dev_info(&dev->pci->dev, "-- nothing to abort\n");
return FAILED;
}
rtsx_set_stat(chip, RTSX_STAT_ABORT);
scsi_unlock(host);
/* Wait for the aborted command to finish */
wait_for_completion(&dev->notify);
return SUCCESS;
}
/*
* This invokes the transport reset mechanism to reset the state of the
* device
*/
static int device_reset(struct scsi_cmnd *srb)
{
return SUCCESS;
}
/*
* this defines our host template, with which we'll allocate hosts
*/
static const struct scsi_host_template rtsx_host_template = {
/* basic userland interface stuff */
.name = CR_DRIVER_NAME,
.proc_name = CR_DRIVER_NAME,
.info = host_info,
/* command interface -- queued only */
.queuecommand = queuecommand,
/* error and abort handlers */
.eh_abort_handler = command_abort,
.eh_device_reset_handler = device_reset,
/* queue commands only, only one command per LUN */
.can_queue = 1,
/* unknown initiator id */
.this_id = -1,
.slave_alloc = slave_alloc,
.slave_configure = slave_configure,
/* lots of sg segments can be handled */
.sg_tablesize = SG_ALL,
/* limit the total size of a transfer to 120 KB */
.max_sectors = 240,
/* emulated HBA */
.emulated = 1,
/* we do our own delay after a device or bus reset */
.skip_settle_delay = 1,
/* module management */
.module = THIS_MODULE
};
static int rtsx_acquire_irq(struct rtsx_dev *dev)
{
struct rtsx_chip *chip = dev->chip;
dev_info(&dev->pci->dev, "%s: chip->msi_en = %d, pci->irq = %d\n",
__func__, chip->msi_en, dev->pci->irq);
if (request_irq(dev->pci->irq, rtsx_interrupt,
chip->msi_en ? 0 : IRQF_SHARED,
CR_DRIVER_NAME, dev)) {
dev_err(&dev->pci->dev,
"rtsx: unable to grab IRQ %d, disabling device\n",
dev->pci->irq);
return -1;
}
dev->irq = dev->pci->irq;
pci_intx(dev->pci, !chip->msi_en);
return 0;
}
/*
* power management
*/
static int __maybe_unused rtsx_suspend(struct device *dev_d)
{
struct pci_dev *pci = to_pci_dev(dev_d);
struct rtsx_dev *dev = pci_get_drvdata(pci);
struct rtsx_chip *chip;
if (!dev)
return 0;
/* lock the device pointers */
mutex_lock(&dev->dev_mutex);
chip = dev->chip;
rtsx_do_before_power_down(chip, PM_S3);
if (dev->irq >= 0) {
free_irq(dev->irq, (void *)dev);
dev->irq = -1;
}
if (chip->msi_en)
pci_free_irq_vectors(pci);
device_wakeup_enable(dev_d);
/* unlock the device pointers */
mutex_unlock(&dev->dev_mutex);
return 0;
}
static int __maybe_unused rtsx_resume(struct device *dev_d)
{
struct pci_dev *pci = to_pci_dev(dev_d);
struct rtsx_dev *dev = pci_get_drvdata(pci);
struct rtsx_chip *chip;
if (!dev)
return 0;
chip = dev->chip;
/* lock the device pointers */
mutex_lock(&dev->dev_mutex);
pci_set_master(pci);
if (chip->msi_en) {
if (pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI) < 0)
chip->msi_en = 0;
}
if (rtsx_acquire_irq(dev) < 0) {
/* unlock the device pointers */
mutex_unlock(&dev->dev_mutex);
return -EIO;
}
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 0x00);
rtsx_init_chip(chip);
/* unlock the device pointers */
mutex_unlock(&dev->dev_mutex);
return 0;
}
static void rtsx_shutdown(struct pci_dev *pci)
{
struct rtsx_dev *dev = pci_get_drvdata(pci);
struct rtsx_chip *chip;
if (!dev)
return;
chip = dev->chip;
rtsx_do_before_power_down(chip, PM_S1);
if (dev->irq >= 0) {
free_irq(dev->irq, (void *)dev);
dev->irq = -1;
}
if (chip->msi_en)
pci_free_irq_vectors(pci);
pci_disable_device(pci);
}
static int rtsx_control_thread(void *__dev)
{
struct rtsx_dev *dev = __dev;
struct rtsx_chip *chip = dev->chip;
struct Scsi_Host *host = rtsx_to_host(dev);
for (;;) {
if (wait_for_completion_interruptible(&dev->cmnd_ready))
break;
/* lock the device pointers */
mutex_lock(&dev->dev_mutex);
/* if the device has disconnected, we are free to exit */
if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
dev_info(&dev->pci->dev, "-- rtsx-control exiting\n");
mutex_unlock(&dev->dev_mutex);
break;
}
/* lock access to the state */
scsi_lock(host);
/* has the command aborted ? */
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
chip->srb->result = DID_ABORT << 16;
goto skip_for_abort;
}
scsi_unlock(host);
/* reject the command if the direction indicator
* is UNKNOWN
*/
if (chip->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
dev_err(&dev->pci->dev, "UNKNOWN data direction\n");
chip->srb->result = DID_ERROR << 16;
} else if (chip->srb->device->id) {
/* reject if target != 0 or if LUN is higher than
* the maximum known LUN
*/
dev_err(&dev->pci->dev, "Bad target number (%d:%d)\n",
chip->srb->device->id,
(u8)chip->srb->device->lun);
chip->srb->result = DID_BAD_TARGET << 16;
} else if (chip->srb->device->lun > chip->max_lun) {
dev_err(&dev->pci->dev, "Bad LUN (%d:%d)\n",
chip->srb->device->id,
(u8)chip->srb->device->lun);
chip->srb->result = DID_BAD_TARGET << 16;
} else {
/* we've got a command, let's do it! */
scsi_show_command(chip);
rtsx_invoke_transport(chip->srb, chip);
}
/* lock access to the state */
scsi_lock(host);
/* did the command already complete because of a disconnect? */
if (!chip->srb)
; /* nothing to do */
/* indicate that the command is done */
else if (chip->srb->result != DID_ABORT << 16) {
scsi_done(chip->srb);
} else {
skip_for_abort:
dev_err(&dev->pci->dev, "scsi command aborted\n");
}
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
complete(&dev->notify);
rtsx_set_stat(chip, RTSX_STAT_IDLE);
}
/* finished working on this command */
chip->srb = NULL;
scsi_unlock(host);
/* unlock the device pointers */
mutex_unlock(&dev->dev_mutex);
} /* for (;;) */
/* notify the exit routine that we're actually exiting now
*
* complete()/wait_for_completion() is similar to up()/down(),
* except that complete() is safe in the case where the structure
* is getting deleted in a parallel mode of execution (i.e. just
* after the down() -- that's necessary for the thread-shutdown
* case.
*
* kthread_complete_and_exit() goes even further than this --
* it is safe in the case that the thread of the caller is going away
* (not just the structure) -- this is necessary for the module-remove
* case. This is important in preemption kernels, which transfer the
* flow of execution immediately upon a complete().
*/
kthread_complete_and_exit(&dev->control_exit, 0);
}
static int rtsx_polling_thread(void *__dev)
{
struct rtsx_dev *dev = __dev;
struct rtsx_chip *chip = dev->chip;
struct sd_info *sd_card = &chip->sd_card;
struct xd_info *xd_card = &chip->xd_card;
struct ms_info *ms_card = &chip->ms_card;
sd_card->cleanup_counter = 0;
xd_card->cleanup_counter = 0;
ms_card->cleanup_counter = 0;
/* Wait until SCSI scan finished */
wait_timeout((delay_use + 5) * 1000);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
/* lock the device pointers */
mutex_lock(&dev->dev_mutex);
/* if the device has disconnected, we are free to exit */
if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
dev_info(&dev->pci->dev, "-- rtsx-polling exiting\n");
mutex_unlock(&dev->dev_mutex);
break;
}
mutex_unlock(&dev->dev_mutex);
mspro_polling_format_status(chip);
/* lock the device pointers */
mutex_lock(&dev->dev_mutex);
rtsx_polling_func(chip);
/* unlock the device pointers */
mutex_unlock(&dev->dev_mutex);
}
kthread_complete_and_exit(&dev->polling_exit, 0);
}
/*
* interrupt handler
*/
static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
{
struct rtsx_dev *dev = dev_id;
struct rtsx_chip *chip;
int retval;
u32 status;
if (dev)
chip = dev->chip;
else
return IRQ_NONE;
if (!chip)
return IRQ_NONE;
spin_lock(&dev->reg_lock);
retval = rtsx_pre_handle_interrupt(chip);
if (retval == STATUS_FAIL) {
spin_unlock(&dev->reg_lock);
if (chip->int_reg == 0xFFFFFFFF)
return IRQ_HANDLED;
return IRQ_NONE;
}
status = chip->int_reg;
if (dev->check_card_cd) {
if (!(dev->check_card_cd & status)) {
/* card not exist, return TRANS_RESULT_FAIL */
dev->trans_result = TRANS_RESULT_FAIL;
if (dev->done)
complete(dev->done);
goto exit;
}
}
if (status & (NEED_COMPLETE_INT | DELINK_INT)) {
if (status & (TRANS_FAIL_INT | DELINK_INT)) {
if (status & DELINK_INT)
RTSX_SET_DELINK(chip);
dev->trans_result = TRANS_RESULT_FAIL;
if (dev->done)
complete(dev->done);
} else if (status & TRANS_OK_INT) {
dev->trans_result = TRANS_RESULT_OK;
if (dev->done)
complete(dev->done);
} else if (status & DATA_DONE_INT) {
dev->trans_result = TRANS_NOT_READY;
if (dev->done && dev->trans_state == STATE_TRANS_SG)
complete(dev->done);
}
}
exit:
spin_unlock(&dev->reg_lock);
return IRQ_HANDLED;
}
/* Release all our dynamic resources */
static void rtsx_release_resources(struct rtsx_dev *dev)
{
dev_info(&dev->pci->dev, "-- %s\n", __func__);
/* Tell the control thread to exit. The SCSI host must
* already have been removed so it won't try to queue
* any more commands.
*/
dev_info(&dev->pci->dev, "-- sending exit command to thread\n");
complete(&dev->cmnd_ready);
if (dev->ctl_thread)
wait_for_completion(&dev->control_exit);
if (dev->polling_thread)
wait_for_completion(&dev->polling_exit);
wait_timeout(200);
if (dev->rtsx_resv_buf) {
dev->chip->host_cmds_ptr = NULL;
dev->chip->host_sg_tbl_ptr = NULL;
}
if (dev->irq > 0)
free_irq(dev->irq, (void *)dev);
if (dev->chip->msi_en)
pci_free_irq_vectors(dev->pci);
if (dev->remap_addr)
iounmap(dev->remap_addr);
rtsx_release_chip(dev->chip);
kfree(dev->chip);
}
/*
* First stage of disconnect processing: stop all commands and remove
* the host
*/
static void quiesce_and_remove_host(struct rtsx_dev *dev)
{
struct Scsi_Host *host = rtsx_to_host(dev);
struct rtsx_chip *chip = dev->chip;
/*
* Prevent new transfers, stop the current command, and
* interrupt a SCSI-scan or device-reset delay
*/
mutex_lock(&dev->dev_mutex);
scsi_lock(host);
rtsx_set_stat(chip, RTSX_STAT_DISCONNECT);
scsi_unlock(host);
mutex_unlock(&dev->dev_mutex);
wake_up(&dev->delay_wait);
wait_for_completion(&dev->scanning_done);
/* Wait some time to let other threads exist */
wait_timeout(100);
/*
* queuecommand won't accept any new commands and the control
* thread won't execute a previously-queued command. If there
* is such a command pending, complete it with an error.
*/
mutex_lock(&dev->dev_mutex);
if (chip->srb) {
chip->srb->result = DID_NO_CONNECT << 16;
scsi_lock(host);
scsi_done(dev->chip->srb);
chip->srb = NULL;
scsi_unlock(host);
}
mutex_unlock(&dev->dev_mutex);
/* Now we own no commands so it's safe to remove the SCSI host */
scsi_remove_host(host);
}
/* Second stage of disconnect processing: deallocate all resources */
static void release_everything(struct rtsx_dev *dev)
{
rtsx_release_resources(dev);
/*
* Drop our reference to the host; the SCSI core will free it
* when the refcount becomes 0.
*/
scsi_host_put(rtsx_to_host(dev));
}
/* Thread to carry out delayed SCSI-device scanning */
static int rtsx_scan_thread(void *__dev)
{
struct rtsx_dev *dev = __dev;
struct rtsx_chip *chip = dev->chip;
/* Wait for the timeout to expire or for a disconnect */
if (delay_use > 0) {
dev_info(&dev->pci->dev,
"%s: waiting for device to settle before scanning\n",
CR_DRIVER_NAME);
wait_event_interruptible_timeout
(dev->delay_wait,
rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT),
delay_use * HZ);
}
/* If the device is still connected, perform the scanning */
if (!rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
scsi_scan_host(rtsx_to_host(dev));
dev_info(&dev->pci->dev, "%s: device scan complete\n",
CR_DRIVER_NAME);
/* Should we unbind if no devices were detected? */
}
kthread_complete_and_exit(&dev->scanning_done, 0);
}
static void rtsx_init_options(struct rtsx_chip *chip)
{
chip->vendor_id = chip->rtsx->pci->vendor;
chip->product_id = chip->rtsx->pci->device;
chip->adma_mode = 1;
chip->lun_mc = 0;
chip->driver_first_load = 1;
#ifdef HW_AUTO_SWITCH_SD_BUS
chip->sdio_in_charge = 0;
#endif
chip->mspro_formatter_enable = 1;
chip->ignore_sd = 0;
chip->use_hw_setting = 0;
chip->lun_mode = DEFAULT_SINGLE;
chip->auto_delink_en = auto_delink_en;
chip->ss_en = ss_en;
chip->ss_idle_period = ss_interval * 1000;
chip->remote_wakeup_en = 0;
chip->aspm_l0s_l1_en = aspm_l0s_l1_en;
chip->dynamic_aspm = 1;
chip->fpga_sd_sdr104_clk = CLK_200;
chip->fpga_sd_ddr50_clk = CLK_100;
chip->fpga_sd_sdr50_clk = CLK_100;
chip->fpga_sd_hs_clk = CLK_100;
chip->fpga_mmc_52m_clk = CLK_80;
chip->fpga_ms_hg_clk = CLK_80;
chip->fpga_ms_4bit_clk = CLK_80;
chip->fpga_ms_1bit_clk = CLK_40;
chip->asic_sd_sdr104_clk = 203;
chip->asic_sd_sdr50_clk = 98;
chip->asic_sd_ddr50_clk = 98;
chip->asic_sd_hs_clk = 98;
chip->asic_mmc_52m_clk = 98;
chip->asic_ms_hg_clk = 117;
chip->asic_ms_4bit_clk = 78;
chip->asic_ms_1bit_clk = 39;
chip->ssc_depth_sd_sdr104 = SSC_DEPTH_2M;
chip->ssc_depth_sd_sdr50 = SSC_DEPTH_2M;
chip->ssc_depth_sd_ddr50 = SSC_DEPTH_1M;
chip->ssc_depth_sd_hs = SSC_DEPTH_1M;
chip->ssc_depth_mmc_52m = SSC_DEPTH_1M;
chip->ssc_depth_ms_hg = SSC_DEPTH_1M;
chip->ssc_depth_ms_4bit = SSC_DEPTH_512K;
chip->ssc_depth_low_speed = SSC_DEPTH_512K;
chip->ssc_en = 1;
chip->sd_speed_prior = 0x01040203;
chip->sd_current_prior = 0x00010203;
chip->sd_ctl = SD_PUSH_POINT_AUTO |
SD_SAMPLE_POINT_AUTO |
SUPPORT_MMC_DDR_MODE;
chip->sd_ddr_tx_phase = 0;
chip->mmc_ddr_tx_phase = 1;
chip->sd_default_tx_phase = 15;
chip->sd_default_rx_phase = 15;
chip->pmos_pwr_on_interval = 200;
chip->sd_voltage_switch_delay = 1000;
chip->ms_power_class_en = 3;
chip->sd_400mA_ocp_thd = 1;
chip->sd_800mA_ocp_thd = 5;
chip->ms_ocp_thd = 2;
chip->card_drive_sel = 0x55;
chip->sd30_drive_sel_1v8 = 0x03;
chip->sd30_drive_sel_3v3 = 0x01;
chip->do_delink_before_power_down = 1;
chip->auto_power_down = 1;
chip->polling_config = 0;
chip->force_clkreq_0 = 1;
chip->ft2_fast_mode = 0;
chip->sdio_retry_cnt = 1;
chip->xd_timeout = 2000;
chip->sd_timeout = 10000;
chip->ms_timeout = 2000;
chip->mspro_timeout = 15000;
chip->power_down_in_ss = 1;
chip->sdr104_en = 1;
chip->sdr50_en = 1;
chip->ddr50_en = 1;
chip->delink_stage1_step = 100;
chip->delink_stage2_step = 40;
chip->delink_stage3_step = 20;
chip->auto_delink_in_L1 = 1;
chip->blink_led = 1;
chip->msi_en = msi_en;
chip->hp_watch_bios_hotplug = 0;
chip->max_payload = 0;
chip->phy_voltage = 0;
chip->support_ms_8bit = 1;
chip->s3_pwr_off_delay = 1000;
}
static int rtsx_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct Scsi_Host *host;
struct rtsx_dev *dev;
int err = 0;
struct task_struct *th;
dev_dbg(&pci->dev, "Realtek PCI-E card reader detected\n");
err = pcim_enable_device(pci);
if (err < 0) {
dev_err(&pci->dev, "PCI enable device failed!\n");
return err;
}
err = pci_request_regions(pci, CR_DRIVER_NAME);
if (err < 0) {
dev_err(&pci->dev, "PCI request regions for %s failed!\n",
CR_DRIVER_NAME);
return err;
}
/*
* Ask the SCSI layer to allocate a host structure, with extra
* space at the end for our private rtsx_dev structure.
*/
host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
if (!host) {
dev_err(&pci->dev, "Unable to allocate the scsi host\n");
err = -ENOMEM;
goto scsi_host_alloc_fail;
}
dev = host_to_rtsx(host);
memset(dev, 0, sizeof(struct rtsx_dev));
dev->chip = kzalloc(sizeof(*dev->chip), GFP_KERNEL);
if (!dev->chip) {
err = -ENOMEM;
goto chip_alloc_fail;
}
spin_lock_init(&dev->reg_lock);
mutex_init(&dev->dev_mutex);
init_completion(&dev->cmnd_ready);
init_completion(&dev->control_exit);
init_completion(&dev->polling_exit);
init_completion(&dev->notify);
init_completion(&dev->scanning_done);
init_waitqueue_head(&dev->delay_wait);
dev->pci = pci;
dev->irq = -1;
dev_info(&pci->dev, "Resource length: 0x%x\n",
(unsigned int)pci_resource_len(pci, 0));
dev->addr = pci_resource_start(pci, 0);
dev->remap_addr = ioremap(dev->addr, pci_resource_len(pci, 0));
if (!dev->remap_addr) {
dev_err(&pci->dev, "ioremap error\n");
err = -ENXIO;
goto ioremap_fail;
}
/*
* Using "unsigned long" cast here to eliminate gcc warning in
* 64-bit system
*/
dev_info(&pci->dev, "Original address: 0x%lx, remapped address: 0x%lx\n",
(unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));
dev->rtsx_resv_buf = dmam_alloc_coherent(&pci->dev, RTSX_RESV_BUF_LEN,
&dev->rtsx_resv_buf_addr,
GFP_KERNEL);
if (!dev->rtsx_resv_buf) {
dev_err(&pci->dev, "alloc dma buffer fail\n");
err = -ENXIO;
goto dma_alloc_fail;
}
dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr +
HOST_CMDS_BUF_LEN;
dev->chip->rtsx = dev;
rtsx_init_options(dev->chip);
dev_info(&pci->dev, "pci->irq = %d\n", pci->irq);
if (dev->chip->msi_en) {
if (pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI) < 0)
dev->chip->msi_en = 0;
}
if (rtsx_acquire_irq(dev) < 0) {
err = -EBUSY;
goto irq_acquire_fail;
}
pci_set_master(pci);
synchronize_irq(dev->irq);
rtsx_init_chip(dev->chip);
/*
* set the supported max_lun and max_id for the scsi host
* NOTE: the minimal value of max_id is 1
*/
host->max_id = 1;
host->max_lun = dev->chip->max_lun;
/* Start up our control thread */
th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
if (IS_ERR(th)) {
dev_err(&pci->dev, "Unable to start control thread\n");
err = PTR_ERR(th);
goto control_thread_fail;
}
dev->ctl_thread = th;
err = scsi_add_host(host, &pci->dev);
if (err) {
dev_err(&pci->dev, "Unable to add the scsi host\n");
goto scsi_add_host_fail;
}
/* Start up the thread for delayed SCSI-device scanning */
th = kthread_run(rtsx_scan_thread, dev, "rtsx-scan");
if (IS_ERR(th)) {
dev_err(&pci->dev, "Unable to start the device-scanning thread\n");
complete(&dev->scanning_done);
err = PTR_ERR(th);
goto scan_thread_fail;
}
/* Start up the thread for polling thread */
th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
if (IS_ERR(th)) {
dev_err(&pci->dev, "Unable to start the device-polling thread\n");
err = PTR_ERR(th);
goto scan_thread_fail;
}
dev->polling_thread = th;
pci_set_drvdata(pci, dev);
return 0;
/* We come here if there are any problems */
scan_thread_fail:
quiesce_and_remove_host(dev);
scsi_add_host_fail:
complete(&dev->cmnd_ready);
wait_for_completion(&dev->control_exit);
control_thread_fail:
free_irq(dev->irq, (void *)dev);
rtsx_release_chip(dev->chip);
irq_acquire_fail:
dev->chip->host_cmds_ptr = NULL;
dev->chip->host_sg_tbl_ptr = NULL;
if (dev->chip->msi_en)
pci_free_irq_vectors(dev->pci);
dma_alloc_fail:
iounmap(dev->remap_addr);
ioremap_fail:
kfree(dev->chip);
chip_alloc_fail:
dev_err(&pci->dev, "%s failed\n", __func__);
scsi_host_put(host);
scsi_host_alloc_fail:
pci_release_regions(pci);
return err;
}
static void rtsx_remove(struct pci_dev *pci)
{
struct rtsx_dev *dev = pci_get_drvdata(pci);
quiesce_and_remove_host(dev);
release_everything(dev);
pci_release_regions(pci);
}
/* PCI IDs */
static const struct pci_device_id rtsx_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5208),
PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5288),
PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, rtsx_ids);
static SIMPLE_DEV_PM_OPS(rtsx_pm_ops, rtsx_suspend, rtsx_resume);
/* pci_driver definition */
static struct pci_driver rtsx_driver = {
.name = CR_DRIVER_NAME,
.id_table = rtsx_ids,
.probe = rtsx_probe,
.remove = rtsx_remove,
.driver.pm = &rtsx_pm_ops,
.shutdown = rtsx_shutdown,
};
module_pci_driver(rtsx_driver);
| linux-master | drivers/staging/rts5208/rtsx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG ([email protected])
* Micky Ching ([email protected])
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include "rtsx.h"
#include "sd.h"
#define SD_MAX_RETRY_COUNT 3
static u16 REG_SD_CFG1;
static u16 REG_SD_CFG2;
static u16 REG_SD_CFG3;
static u16 REG_SD_STAT1;
static u16 REG_SD_STAT2;
static u16 REG_SD_BUS_STAT;
static u16 REG_SD_PAD_CTL;
static u16 REG_SD_SAMPLE_POINT_CTL;
static u16 REG_SD_PUSH_POINT_CTL;
static u16 REG_SD_CMD0;
static u16 REG_SD_CMD1;
static u16 REG_SD_CMD2;
static u16 REG_SD_CMD3;
static u16 REG_SD_CMD4;
static u16 REG_SD_CMD5;
static u16 REG_SD_BYTE_CNT_L;
static u16 REG_SD_BYTE_CNT_H;
static u16 REG_SD_BLOCK_CNT_L;
static u16 REG_SD_BLOCK_CNT_H;
static u16 REG_SD_TRANSFER;
static u16 REG_SD_VPCLK0_CTL;
static u16 REG_SD_VPCLK1_CTL;
static u16 REG_SD_DCMPS0_CTL;
static u16 REG_SD_DCMPS1_CTL;
static inline void sd_set_err_code(struct rtsx_chip *chip, u8 err_code)
{
struct sd_info *sd_card = &chip->sd_card;
sd_card->err_code |= err_code;
}
static inline void sd_clr_err_code(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
sd_card->err_code = 0;
}
static inline int sd_check_err_code(struct rtsx_chip *chip, u8 err_code)
{
struct sd_info *sd_card = &chip->sd_card;
return sd_card->err_code & err_code;
}
static void sd_init_reg_addr(struct rtsx_chip *chip)
{
REG_SD_CFG1 = 0xFD31;
REG_SD_CFG2 = 0xFD33;
REG_SD_CFG3 = 0xFD3E;
REG_SD_STAT1 = 0xFD30;
REG_SD_STAT2 = 0;
REG_SD_BUS_STAT = 0;
REG_SD_PAD_CTL = 0;
REG_SD_SAMPLE_POINT_CTL = 0;
REG_SD_PUSH_POINT_CTL = 0;
REG_SD_CMD0 = 0xFD34;
REG_SD_CMD1 = 0xFD35;
REG_SD_CMD2 = 0xFD36;
REG_SD_CMD3 = 0xFD37;
REG_SD_CMD4 = 0xFD38;
REG_SD_CMD5 = 0xFD5A;
REG_SD_BYTE_CNT_L = 0xFD39;
REG_SD_BYTE_CNT_H = 0xFD3A;
REG_SD_BLOCK_CNT_L = 0xFD3B;
REG_SD_BLOCK_CNT_H = 0xFD3C;
REG_SD_TRANSFER = 0xFD32;
REG_SD_VPCLK0_CTL = 0;
REG_SD_VPCLK1_CTL = 0;
REG_SD_DCMPS0_CTL = 0;
REG_SD_DCMPS1_CTL = 0;
}
static int sd_check_data0_status(struct rtsx_chip *chip)
{
int retval;
u8 stat;
retval = rtsx_read_register(chip, REG_SD_STAT1, &stat);
if (retval)
return retval;
if (!(stat & SD_DAT0_STATUS)) {
sd_set_err_code(chip, SD_BUSY);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
u32 arg, u8 rsp_type, u8 *rsp, int rsp_len)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
int timeout = 100;
u16 reg_addr;
u8 *ptr;
int stat_idx = 0;
int rty_cnt = 0;
sd_clr_err_code(chip);
dev_dbg(rtsx_dev(chip), "SD/MMC CMD %d, arg = 0x%08x\n", cmd_idx, arg);
if (rsp_type == SD_RSP_TYPE_R1b)
timeout = 3000;
RTY_SEND_CMD:
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | cmd_idx);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, (u8)(arg >> 24));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, (u8)(arg >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, (u8)(arg >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, (u8)arg);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END |
SD_STAT_IDLE);
if (rsp_type == SD_RSP_TYPE_R2) {
for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
reg_addr++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
stat_idx = 16;
} else if (rsp_type != SD_RSP_TYPE_R0) {
for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4;
reg_addr++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
stat_idx = 5;
}
rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_STAT1, 0, 0);
retval = rtsx_send_cmd(chip, SD_CARD, timeout);
if (retval < 0) {
u8 val;
rtsx_read_register(chip, REG_SD_STAT1, &val);
dev_dbg(rtsx_dev(chip), "SD_STAT1: 0x%x\n", val);
rtsx_read_register(chip, REG_SD_CFG3, &val);
dev_dbg(rtsx_dev(chip), "SD_CFG3: 0x%x\n", val);
if (retval == -ETIMEDOUT) {
if (rsp_type & SD_WAIT_BUSY_END) {
retval = sd_check_data0_status(chip);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
return retval;
}
} else {
sd_set_err_code(chip, SD_TO_ERR);
}
retval = STATUS_TIMEDOUT;
} else {
retval = STATUS_FAIL;
}
rtsx_clear_sd_error(chip);
return retval;
}
if (rsp_type == SD_RSP_TYPE_R0)
return STATUS_SUCCESS;
ptr = rtsx_get_cmd_data(chip) + 1;
if ((ptr[0] & 0xC0) != 0) {
sd_set_err_code(chip, SD_STS_ERR);
return STATUS_FAIL;
}
if (!(rsp_type & SD_NO_CHECK_CRC7)) {
if (ptr[stat_idx] & SD_CRC7_ERR) {
if (cmd_idx == WRITE_MULTIPLE_BLOCK) {
sd_set_err_code(chip, SD_CRC_ERR);
return STATUS_FAIL;
}
if (rty_cnt < SD_MAX_RETRY_COUNT) {
wait_timeout(20);
rty_cnt++;
goto RTY_SEND_CMD;
} else {
sd_set_err_code(chip, SD_CRC_ERR);
return STATUS_FAIL;
}
}
}
if (rsp_type == SD_RSP_TYPE_R1 || rsp_type == SD_RSP_TYPE_R1b) {
if (cmd_idx != SEND_RELATIVE_ADDR &&
cmd_idx != SEND_IF_COND) {
if (cmd_idx != STOP_TRANSMISSION) {
if (ptr[1] & 0x80)
return STATUS_FAIL;
}
#ifdef SUPPORT_SD_LOCK
if (ptr[1] & 0x7D) {
#else
if (ptr[1] & 0x7F) {
#endif
dev_dbg(rtsx_dev(chip), "ptr[1]: 0x%02x\n",
ptr[1]);
return STATUS_FAIL;
}
if (ptr[2] & 0xFF) {
dev_dbg(rtsx_dev(chip), "ptr[2]: 0x%02x\n",
ptr[2]);
return STATUS_FAIL;
}
if (ptr[3] & 0x80) {
dev_dbg(rtsx_dev(chip), "ptr[3]: 0x%02x\n",
ptr[3]);
return STATUS_FAIL;
}
if (ptr[3] & 0x01)
sd_card->sd_data_buf_ready = 1;
else
sd_card->sd_data_buf_ready = 0;
}
}
if (rsp && rsp_len)
memcpy(rsp, ptr, rsp_len);
return STATUS_SUCCESS;
}
static int sd_read_data(struct rtsx_chip *chip,
u8 trans_mode, u8 *cmd, int cmd_len, u16 byte_cnt,
u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len,
int timeout)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
sd_clr_err_code(chip);
if (!buf)
buf_len = 0;
if (buf_len > 512)
return STATUS_FAIL;
rtsx_init_cmd(chip);
if (cmd_len) {
dev_dbg(rtsx_dev(chip), "SD/MMC CMD %d\n", cmd[0] - 0x40);
for (i = 0; i < (min(cmd_len, 6)); i++)
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0 + i,
0xFF, cmd[i]);
}
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
(u8)byte_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
(u8)(byte_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
(u8)blk_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
(u8)(blk_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
SD_CHECK_CRC7 | SD_RSP_LEN_6);
if (trans_mode != SD_TM_AUTO_TUNING)
rtsx_add_cmd(chip, WRITE_REG_CMD,
CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
trans_mode | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
SD_TRANSFER_END);
retval = rtsx_send_cmd(chip, SD_CARD, timeout);
if (retval < 0) {
if (retval == -ETIMEDOUT) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
}
return STATUS_FAIL;
}
if (buf && buf_len) {
retval = rtsx_read_ppbuf(chip, buf, buf_len);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt,
u8 bus_width, u8 *buf, int buf_len, int timeout)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
sd_clr_err_code(chip);
if (!buf)
buf_len = 0;
if (buf_len > 512) {
/* This function can't write data more than one page */
return STATUS_FAIL;
}
if (buf && buf_len) {
retval = rtsx_write_ppbuf(chip, buf, buf_len);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
rtsx_init_cmd(chip);
if (cmd_len) {
dev_dbg(rtsx_dev(chip), "SD/MMC CMD %d\n", cmd[0] - 0x40);
for (i = 0; i < (min(cmd_len, 6)); i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
REG_SD_CMD0 + i, 0xFF, cmd[i]);
}
}
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
(u8)byte_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
(u8)(byte_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
(u8)blk_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
(u8)(blk_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
SD_CHECK_CRC7 | SD_RSP_LEN_6);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
trans_mode | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
SD_TRANSFER_END);
retval = rtsx_send_cmd(chip, SD_CARD, timeout);
if (retval < 0) {
if (retval == -ETIMEDOUT) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
}
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
u8 csd_ver, trans_speed;
u8 rsp[16];
for (i = 0; i < 6; i++) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
return STATUS_FAIL;
}
retval = sd_send_cmd_get_rsp(chip, SEND_CSD, sd_card->sd_addr,
SD_RSP_TYPE_R2, rsp, 16);
if (retval == STATUS_SUCCESS)
break;
}
if (i == 6)
return STATUS_FAIL;
memcpy(sd_card->raw_csd, rsp + 1, 15);
dev_dbg(rtsx_dev(chip), "CSD Response:\n");
dev_dbg(rtsx_dev(chip), "%*ph\n", 16, sd_card->raw_csd);
csd_ver = (rsp[1] & 0xc0) >> 6;
dev_dbg(rtsx_dev(chip), "csd_ver = %d\n", csd_ver);
trans_speed = rsp[4];
if ((trans_speed & 0x07) == 0x02) {
if ((trans_speed & 0xf8) >= 0x30) {
if (chip->asic_code)
sd_card->sd_clock = 47;
else
sd_card->sd_clock = CLK_50;
} else if ((trans_speed & 0xf8) == 0x28) {
if (chip->asic_code)
sd_card->sd_clock = 39;
else
sd_card->sd_clock = CLK_40;
} else if ((trans_speed & 0xf8) == 0x20) {
if (chip->asic_code)
sd_card->sd_clock = 29;
else
sd_card->sd_clock = CLK_30;
} else if ((trans_speed & 0xf8) >= 0x10) {
if (chip->asic_code)
sd_card->sd_clock = 23;
else
sd_card->sd_clock = CLK_20;
} else if ((trans_speed & 0x08) >= 0x08) {
if (chip->asic_code)
sd_card->sd_clock = 19;
else
sd_card->sd_clock = CLK_20;
} else {
return STATUS_FAIL;
}
} else {
return STATUS_FAIL;
}
if (CHK_MMC_SECTOR_MODE(sd_card)) {
sd_card->capacity = 0;
} else {
if ((!CHK_SD_HCXC(sd_card)) || csd_ver == 0) {
u8 blk_size, c_size_mult;
u16 c_size;
blk_size = rsp[6] & 0x0F;
c_size = ((u16)(rsp[7] & 0x03) << 10)
+ ((u16)rsp[8] << 2)
+ ((u16)(rsp[9] & 0xC0) >> 6);
c_size_mult = (u8)((rsp[10] & 0x03) << 1);
c_size_mult += (rsp[11] & 0x80) >> 7;
sd_card->capacity = (((u32)(c_size + 1)) *
(1 << (c_size_mult + 2)))
<< (blk_size - 9);
} else {
u32 total_sector = 0;
total_sector = (((u32)rsp[8] & 0x3f) << 16) |
((u32)rsp[9] << 8) | (u32)rsp[10];
sd_card->capacity = (total_sector + 1) << 10;
}
}
if (check_wp) {
if (rsp[15] & 0x30)
chip->card_wp |= SD_CARD;
dev_dbg(rtsx_dev(chip), "CSD WP Status: 0x%x\n", rsp[15]);
}
return STATUS_SUCCESS;
}
static int sd_set_sample_push_timing(struct rtsx_chip *chip)
{
int retval;
struct sd_info *sd_card = &chip->sd_card;
u8 val = 0;
if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_DELAY)
val |= 0x10;
if ((chip->sd_ctl & SD_SAMPLE_POINT_CTL_MASK) == SD_SAMPLE_POINT_AUTO) {
if (chip->asic_code) {
if (CHK_SD_HS(sd_card) || CHK_MMC_52M(sd_card)) {
if (val & 0x10)
val |= 0x04;
else
val |= 0x08;
}
} else {
if (val & 0x10)
val |= 0x04;
else
val |= 0x08;
}
} else if ((chip->sd_ctl & SD_SAMPLE_POINT_CTL_MASK) ==
SD_SAMPLE_POINT_DELAY) {
if (val & 0x10)
val |= 0x04;
else
val |= 0x08;
}
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x1C, val);
if (retval)
return retval;
return STATUS_SUCCESS;
}
static void sd_choose_proper_clock(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
if (CHK_SD_SDR104(sd_card)) {
if (chip->asic_code)
sd_card->sd_clock = chip->asic_sd_sdr104_clk;
else
sd_card->sd_clock = chip->fpga_sd_sdr104_clk;
} else if (CHK_SD_DDR50(sd_card)) {
if (chip->asic_code)
sd_card->sd_clock = chip->asic_sd_ddr50_clk;
else
sd_card->sd_clock = chip->fpga_sd_ddr50_clk;
} else if (CHK_SD_SDR50(sd_card)) {
if (chip->asic_code)
sd_card->sd_clock = chip->asic_sd_sdr50_clk;
else
sd_card->sd_clock = chip->fpga_sd_sdr50_clk;
} else if (CHK_SD_HS(sd_card)) {
if (chip->asic_code)
sd_card->sd_clock = chip->asic_sd_hs_clk;
else
sd_card->sd_clock = chip->fpga_sd_hs_clk;
} else if (CHK_MMC_52M(sd_card) || CHK_MMC_DDR52(sd_card)) {
if (chip->asic_code)
sd_card->sd_clock = chip->asic_mmc_52m_clk;
else
sd_card->sd_clock = chip->fpga_mmc_52m_clk;
} else if (CHK_MMC_26M(sd_card)) {
if (chip->asic_code)
sd_card->sd_clock = 48;
else
sd_card->sd_clock = CLK_50;
}
}
static int sd_set_clock_divider(struct rtsx_chip *chip, u8 clk_div)
{
int retval;
u8 mask = 0, val = 0;
mask = 0x60;
if (clk_div == SD_CLK_DIVIDE_0)
val = 0x00;
else if (clk_div == SD_CLK_DIVIDE_128)
val = 0x40;
else if (clk_div == SD_CLK_DIVIDE_256)
val = 0x20;
retval = rtsx_write_register(chip, REG_SD_CFG1, mask, val);
if (retval)
return retval;
return STATUS_SUCCESS;
}
static int sd_set_init_para(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
retval = sd_set_sample_push_timing(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
sd_choose_proper_clock(chip);
retval = switch_clock(chip, sd_card->sd_clock);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
int sd_select_card(struct rtsx_chip *chip, int select)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd_idx, cmd_type;
u32 addr;
if (select) {
cmd_idx = SELECT_CARD;
cmd_type = SD_RSP_TYPE_R1;
addr = sd_card->sd_addr;
} else {
cmd_idx = DESELECT_CARD;
cmd_type = SD_RSP_TYPE_R0;
addr = 0;
}
retval = sd_send_cmd_get_rsp(chip, cmd_idx, addr, cmd_type, NULL, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
#ifdef SUPPORT_SD_LOCK
static int sd_update_lock_status(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 rsp[5];
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, rsp, 5);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (rsp[1] & 0x02)
sd_card->sd_lock_status |= SD_LOCKED;
else
sd_card->sd_lock_status &= ~SD_LOCKED;
dev_dbg(rtsx_dev(chip), "sd_card->sd_lock_status = 0x%x\n",
sd_card->sd_lock_status);
if (rsp[1] & 0x01)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
#endif
static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state,
u8 data_ready, int polling_cnt)
{
struct sd_info *sd_card = &chip->sd_card;
int retval, i;
u8 rsp[5];
for (i = 0; i < polling_cnt; i++) {
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
sd_card->sd_addr, SD_RSP_TYPE_R1,
rsp, 5);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (((rsp[3] & 0x1E) == state) &&
((rsp[3] & 0x01) == data_ready))
return STATUS_SUCCESS;
}
return STATUS_FAIL;
}
static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
{
int retval;
if (voltage == SD_IO_3V3) {
if (chip->asic_code) {
retval = rtsx_write_phy_register(chip, 0x08,
0x4FC0 |
chip->phy_voltage);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
retval = rtsx_write_register(chip, SD_PAD_CTL,
SD_IO_USING_1V8, 0);
if (retval)
return retval;
}
} else if (voltage == SD_IO_1V8) {
if (chip->asic_code) {
retval = rtsx_write_phy_register(chip, 0x08,
0x4C40 |
chip->phy_voltage);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
retval = rtsx_write_register(chip, SD_PAD_CTL,
SD_IO_USING_1V8,
SD_IO_USING_1V8);
if (retval)
return retval;
}
} else {
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int sd_voltage_switch(struct rtsx_chip *chip)
{
int retval;
u8 stat;
retval = rtsx_write_register(chip, SD_BUS_STAT,
SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP,
SD_CLK_TOGGLE_EN);
if (retval)
return retval;
retval = sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1,
NULL, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
udelay(chip->sd_voltage_switch_delay);
retval = rtsx_read_register(chip, SD_BUS_STAT, &stat);
if (retval)
return retval;
if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
SD_DAT1_STATUS | SD_DAT0_STATUS)) {
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, SD_BUS_STAT, 0xFF,
SD_CLK_FORCE_STOP);
if (retval)
return retval;
retval = sd_change_bank_voltage(chip, SD_IO_1V8);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
wait_timeout(50);
retval = rtsx_write_register(chip, SD_BUS_STAT, 0xFF,
SD_CLK_TOGGLE_EN);
if (retval)
return retval;
wait_timeout(10);
retval = rtsx_read_register(chip, SD_BUS_STAT, &stat);
if (retval)
return retval;
if ((stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
SD_DAT1_STATUS | SD_DAT0_STATUS)) !=
(SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
SD_DAT1_STATUS | SD_DAT0_STATUS)) {
dev_dbg(rtsx_dev(chip), "SD_BUS_STAT: 0x%x\n", stat);
rtsx_write_register(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN |
SD_CLK_FORCE_STOP, 0);
rtsx_write_register(chip, CARD_CLK_EN, 0xFF, 0);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, SD_BUS_STAT,
SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
if (retval)
return retval;
return STATUS_SUCCESS;
}
static int sd_reset_dcm(struct rtsx_chip *chip, u8 tune_dir)
{
int retval;
if (tune_dir == TUNE_RX) {
retval = rtsx_write_register(chip, DCM_DRP_CTL, 0xFF,
DCM_RESET | DCM_RX);
if (retval)
return retval;
retval = rtsx_write_register(chip, DCM_DRP_CTL, 0xFF, DCM_RX);
if (retval)
return retval;
} else {
retval = rtsx_write_register(chip, DCM_DRP_CTL, 0xFF,
DCM_RESET | DCM_TX);
if (retval)
return retval;
retval = rtsx_write_register(chip, DCM_DRP_CTL, 0xFF, DCM_TX);
if (retval)
return retval;
}
return STATUS_SUCCESS;
}
static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
{
struct sd_info *sd_card = &chip->sd_card;
u16 SD_VP_CTL, SD_DCMPS_CTL;
u8 val;
int retval;
bool ddr_rx = false;
dev_dbg(rtsx_dev(chip), "%s (sample_point = %d, tune_dir = %d)\n",
__func__, sample_point, tune_dir);
if (tune_dir == TUNE_RX) {
SD_VP_CTL = SD_VPRX_CTL;
SD_DCMPS_CTL = SD_DCMPS_RX_CTL;
if (CHK_SD_DDR50(sd_card))
ddr_rx = true;
} else {
SD_VP_CTL = SD_VPTX_CTL;
SD_DCMPS_CTL = SD_DCMPS_TX_CTL;
}
if (chip->asic_code) {
retval = rtsx_write_register(chip, CLK_CTL, CHANGE_CLK,
CHANGE_CLK);
if (retval)
return retval;
retval = rtsx_write_register(chip, SD_VP_CTL, 0x1F,
sample_point);
if (retval)
return retval;
retval = rtsx_write_register(chip, SD_VPCLK0_CTL,
PHASE_NOT_RESET, 0);
if (retval)
return retval;
retval = rtsx_write_register(chip, SD_VPCLK0_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
if (retval)
return retval;
retval = rtsx_write_register(chip, CLK_CTL, CHANGE_CLK, 0);
if (retval)
return retval;
} else {
rtsx_read_register(chip, SD_VP_CTL, &val);
dev_dbg(rtsx_dev(chip), "SD_VP_CTL: 0x%x\n", val);
rtsx_read_register(chip, SD_DCMPS_CTL, &val);
dev_dbg(rtsx_dev(chip), "SD_DCMPS_CTL: 0x%x\n", val);
if (ddr_rx) {
retval = rtsx_write_register(chip, SD_VP_CTL,
PHASE_CHANGE,
PHASE_CHANGE);
if (retval)
return retval;
udelay(50);
retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF,
PHASE_CHANGE |
PHASE_NOT_RESET |
sample_point);
if (retval)
return retval;
} else {
retval = rtsx_write_register(chip, CLK_CTL,
CHANGE_CLK, CHANGE_CLK);
if (retval)
return retval;
udelay(50);
retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF,
PHASE_NOT_RESET |
sample_point);
if (retval)
return retval;
}
udelay(100);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE,
DCMPS_CHANGE);
rtsx_add_cmd(chip, CHECK_REG_CMD, SD_DCMPS_CTL,
DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
retval = rtsx_send_cmd(chip, SD_CARD, 100);
if (retval != STATUS_SUCCESS)
goto fail;
val = *rtsx_get_cmd_data(chip);
if (val & DCMPS_ERROR)
goto fail;
if ((val & DCMPS_CURRENT_PHASE) != sample_point)
goto fail;
retval = rtsx_write_register(chip, SD_DCMPS_CTL,
DCMPS_CHANGE, 0);
if (retval)
return retval;
if (ddr_rx) {
retval = rtsx_write_register(chip, SD_VP_CTL,
PHASE_CHANGE, 0);
if (retval)
return retval;
} else {
retval = rtsx_write_register(chip, CLK_CTL,
CHANGE_CLK, 0);
if (retval)
return retval;
}
udelay(50);
}
retval = rtsx_write_register(chip, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
if (retval)
return retval;
return STATUS_SUCCESS;
fail:
rtsx_read_register(chip, SD_VP_CTL, &val);
dev_dbg(rtsx_dev(chip), "SD_VP_CTL: 0x%x\n", val);
rtsx_read_register(chip, SD_DCMPS_CTL, &val);
dev_dbg(rtsx_dev(chip), "SD_DCMPS_CTL: 0x%x\n", val);
rtsx_write_register(chip, SD_DCMPS_CTL, DCMPS_CHANGE, 0);
rtsx_write_register(chip, SD_VP_CTL, PHASE_CHANGE, 0);
mdelay(10);
sd_reset_dcm(chip, tune_dir);
return STATUS_FAIL;
}
static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], buf[8];
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
cmd[0] = 0x40 | SEND_SCR;
cmd[1] = 0;
cmd[2] = 0;
cmd[3] = 0;
cmd[4] = 0;
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 8, 1, bus_width,
buf, 8, 250);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
return STATUS_FAIL;
}
memcpy(sd_card->raw_scr, buf, 8);
if ((buf[0] & 0x0F) == 0)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
u8 func_to_switch, u8 *buf, int buf_len)
{
u8 support_mask = 0, query_switch = 0, switch_busy = 0;
int support_offset = 0, query_switch_offset = 0, check_busy_offset = 0;
if (func_group == SD_FUNC_GROUP_1) {
support_offset = FUNCTION_GROUP1_SUPPORT_OFFSET;
query_switch_offset = FUNCTION_GROUP1_QUERY_SWITCH_OFFSET;
check_busy_offset = FUNCTION_GROUP1_CHECK_BUSY_OFFSET;
switch (func_to_switch) {
case HS_SUPPORT:
support_mask = HS_SUPPORT_MASK;
query_switch = HS_QUERY_SWITCH_OK;
switch_busy = HS_SWITCH_BUSY;
break;
case SDR50_SUPPORT:
support_mask = SDR50_SUPPORT_MASK;
query_switch = SDR50_QUERY_SWITCH_OK;
switch_busy = SDR50_SWITCH_BUSY;
break;
case SDR104_SUPPORT:
support_mask = SDR104_SUPPORT_MASK;
query_switch = SDR104_QUERY_SWITCH_OK;
switch_busy = SDR104_SWITCH_BUSY;
break;
case DDR50_SUPPORT:
support_mask = DDR50_SUPPORT_MASK;
query_switch = DDR50_QUERY_SWITCH_OK;
switch_busy = DDR50_SWITCH_BUSY;
break;
default:
return STATUS_FAIL;
}
} else if (func_group == SD_FUNC_GROUP_3) {
support_offset = FUNCTION_GROUP3_SUPPORT_OFFSET;
query_switch_offset = FUNCTION_GROUP3_QUERY_SWITCH_OFFSET;
check_busy_offset = FUNCTION_GROUP3_CHECK_BUSY_OFFSET;
switch (func_to_switch) {
case DRIVING_TYPE_A:
support_mask = DRIVING_TYPE_A_MASK;
query_switch = TYPE_A_QUERY_SWITCH_OK;
switch_busy = TYPE_A_SWITCH_BUSY;
break;
case DRIVING_TYPE_C:
support_mask = DRIVING_TYPE_C_MASK;
query_switch = TYPE_C_QUERY_SWITCH_OK;
switch_busy = TYPE_C_SWITCH_BUSY;
break;
case DRIVING_TYPE_D:
support_mask = DRIVING_TYPE_D_MASK;
query_switch = TYPE_D_QUERY_SWITCH_OK;
switch_busy = TYPE_D_SWITCH_BUSY;
break;
default:
return STATUS_FAIL;
}
} else if (func_group == SD_FUNC_GROUP_4) {
support_offset = FUNCTION_GROUP4_SUPPORT_OFFSET;
query_switch_offset = FUNCTION_GROUP4_QUERY_SWITCH_OFFSET;
check_busy_offset = FUNCTION_GROUP4_CHECK_BUSY_OFFSET;
switch (func_to_switch) {
case CURRENT_LIMIT_400:
support_mask = CURRENT_LIMIT_400_MASK;
query_switch = CURRENT_LIMIT_400_QUERY_SWITCH_OK;
switch_busy = CURRENT_LIMIT_400_SWITCH_BUSY;
break;
case CURRENT_LIMIT_600:
support_mask = CURRENT_LIMIT_600_MASK;
query_switch = CURRENT_LIMIT_600_QUERY_SWITCH_OK;
switch_busy = CURRENT_LIMIT_600_SWITCH_BUSY;
break;
case CURRENT_LIMIT_800:
support_mask = CURRENT_LIMIT_800_MASK;
query_switch = CURRENT_LIMIT_800_QUERY_SWITCH_OK;
switch_busy = CURRENT_LIMIT_800_SWITCH_BUSY;
break;
default:
return STATUS_FAIL;
}
} else {
return STATUS_FAIL;
}
if (func_group == SD_FUNC_GROUP_1) {
if (!(buf[support_offset] & support_mask) ||
((buf[query_switch_offset] & 0x0F) != query_switch)) {
return STATUS_FAIL;
}
}
/* Check 'Busy Status' */
if (buf[DATA_STRUCTURE_VER_OFFSET] == 0x01 &&
((buf[check_busy_offset] & switch_busy) == switch_busy)) {
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group,
u8 func_to_switch, u8 bus_width)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], buf[64];
dev_dbg(rtsx_dev(chip), "%s (mode = %d, func_group = %d, func_to_switch = %d)\n",
__func__, mode, func_group, func_to_switch);
cmd[0] = 0x40 | SWITCH;
cmd[1] = mode;
if (func_group == SD_FUNC_GROUP_1) {
cmd[2] = 0xFF;
cmd[3] = 0xFF;
cmd[4] = 0xF0 + func_to_switch;
} else if (func_group == SD_FUNC_GROUP_3) {
cmd[2] = 0xFF;
cmd[3] = 0xF0 + func_to_switch;
cmd[4] = 0xFF;
} else if (func_group == SD_FUNC_GROUP_4) {
cmd[2] = 0xFF;
cmd[3] = 0x0F + (func_to_switch << 4);
cmd[4] = 0xFF;
} else {
cmd[1] = SD_CHECK_MODE;
cmd[2] = 0xFF;
cmd[3] = 0xFF;
cmd[4] = 0xFF;
}
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, bus_width,
buf, 64, 250);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
return STATUS_FAIL;
}
dev_dbg(rtsx_dev(chip), "%*ph\n", 64, buf);
if (func_group == NO_ARGUMENT) {
sd_card->func_group1_mask = buf[0x0D];
sd_card->func_group2_mask = buf[0x0B];
sd_card->func_group3_mask = buf[0x09];
sd_card->func_group4_mask = buf[0x07];
dev_dbg(rtsx_dev(chip), "func_group1_mask = 0x%02x\n",
buf[0x0D]);
dev_dbg(rtsx_dev(chip), "func_group2_mask = 0x%02x\n",
buf[0x0B]);
dev_dbg(rtsx_dev(chip), "func_group3_mask = 0x%02x\n",
buf[0x09]);
dev_dbg(rtsx_dev(chip), "func_group4_mask = 0x%02x\n",
buf[0x07]);
} else {
/* Maximum current consumption, check whether current is
* acceptable; bit[511:496] = 0x0000 means some error happened.
*/
u16 cc = ((u16)buf[0] << 8) | buf[1];
dev_dbg(rtsx_dev(chip), "Maximum current consumption: %dmA\n",
cc);
if (cc == 0 || cc > 800)
return STATUS_FAIL;
retval = sd_query_switch_result(chip, func_group,
func_to_switch, buf, 64);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (cc > 400 || func_to_switch > CURRENT_LIMIT_400) {
retval = rtsx_write_register(chip, OCPPARA2,
SD_OCP_THD_MASK,
chip->sd_800mA_ocp_thd);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PWR_CTL,
PMOS_STRG_MASK,
PMOS_STRG_800mA);
if (retval)
return retval;
}
}
return STATUS_SUCCESS;
}
static u8 downgrade_switch_mode(u8 func_group, u8 func_to_switch)
{
if (func_group == SD_FUNC_GROUP_1) {
if (func_to_switch > HS_SUPPORT)
func_to_switch--;
} else if (func_group == SD_FUNC_GROUP_4) {
if (func_to_switch > CURRENT_LIMIT_200)
func_to_switch--;
}
return func_to_switch;
}
static int sd_check_switch(struct rtsx_chip *chip,
u8 func_group, u8 func_to_switch, u8 bus_width)
{
int retval;
int i;
bool switch_good = false;
for (i = 0; i < 3; i++) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
return STATUS_FAIL;
}
retval = sd_check_switch_mode(chip, SD_CHECK_MODE, func_group,
func_to_switch, bus_width);
if (retval == STATUS_SUCCESS) {
u8 stat;
retval = sd_check_switch_mode(chip, SD_SWITCH_MODE,
func_group,
func_to_switch,
bus_width);
if (retval == STATUS_SUCCESS) {
switch_good = true;
break;
}
retval = rtsx_read_register(chip, SD_STAT1, &stat);
if (retval)
return retval;
if (stat & SD_CRC16_ERR) {
dev_dbg(rtsx_dev(chip), "SD CRC16 error when switching mode\n");
return STATUS_FAIL;
}
}
func_to_switch = downgrade_switch_mode(func_group,
func_to_switch);
wait_timeout(20);
}
if (!switch_good)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
u8 func_to_switch = 0;
/* Get supported functions */
retval = sd_check_switch_mode(chip, SD_CHECK_MODE, NO_ARGUMENT,
NO_ARGUMENT, bus_width);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
sd_card->func_group1_mask &= ~(sd_card->sd_switch_fail);
/* Function Group 1: Access Mode */
for (i = 0; i < 4; i++) {
switch ((u8)(chip->sd_speed_prior >> (i * 8))) {
case SDR104_SUPPORT:
if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK) &&
chip->sdr104_en) {
func_to_switch = SDR104_SUPPORT;
}
break;
case DDR50_SUPPORT:
if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK) &&
chip->ddr50_en) {
func_to_switch = DDR50_SUPPORT;
}
break;
case SDR50_SUPPORT:
if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK) &&
chip->sdr50_en) {
func_to_switch = SDR50_SUPPORT;
}
break;
case HS_SUPPORT:
if (sd_card->func_group1_mask & HS_SUPPORT_MASK)
func_to_switch = HS_SUPPORT;
break;
default:
continue;
}
if (func_to_switch)
break;
}
dev_dbg(rtsx_dev(chip), "SD_FUNC_GROUP_1: func_to_switch = 0x%02x",
func_to_switch);
#ifdef SUPPORT_SD_LOCK
if ((sd_card->sd_lock_status & SD_SDR_RST) &&
func_to_switch == DDR50_SUPPORT &&
(sd_card->func_group1_mask & SDR50_SUPPORT_MASK)) {
func_to_switch = SDR50_SUPPORT;
dev_dbg(rtsx_dev(chip), "Using SDR50 instead of DDR50 for SD Lock\n");
}
#endif
if (func_to_switch) {
retval = sd_check_switch(chip, SD_FUNC_GROUP_1, func_to_switch,
bus_width);
if (retval != STATUS_SUCCESS) {
if (func_to_switch == SDR104_SUPPORT) {
sd_card->sd_switch_fail = SDR104_SUPPORT_MASK;
} else if (func_to_switch == DDR50_SUPPORT) {
sd_card->sd_switch_fail = SDR104_SUPPORT_MASK |
DDR50_SUPPORT_MASK;
} else if (func_to_switch == SDR50_SUPPORT) {
sd_card->sd_switch_fail = SDR104_SUPPORT_MASK |
DDR50_SUPPORT_MASK | SDR50_SUPPORT_MASK;
}
return STATUS_FAIL;
}
if (func_to_switch == SDR104_SUPPORT)
SET_SD_SDR104(sd_card);
else if (func_to_switch == DDR50_SUPPORT)
SET_SD_DDR50(sd_card);
else if (func_to_switch == SDR50_SUPPORT)
SET_SD_SDR50(sd_card);
else
SET_SD_HS(sd_card);
}
if (CHK_SD_DDR50(sd_card)) {
retval = rtsx_write_register(chip, SD_PUSH_POINT_CTL, 0x06,
0x04);
if (retval)
return retval;
retval = sd_set_sample_push_timing(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
if (!func_to_switch || func_to_switch == HS_SUPPORT) {
/* Do not try to switch current limit if the card doesn't
* support UHS mode or we don't want it to support UHS mode
*/
return STATUS_SUCCESS;
}
/* Function Group 4: Current Limit */
func_to_switch = 0xFF;
for (i = 0; i < 4; i++) {
switch ((u8)(chip->sd_current_prior >> (i * 8))) {
case CURRENT_LIMIT_800:
if (sd_card->func_group4_mask & CURRENT_LIMIT_800_MASK)
func_to_switch = CURRENT_LIMIT_800;
break;
case CURRENT_LIMIT_600:
if (sd_card->func_group4_mask & CURRENT_LIMIT_600_MASK)
func_to_switch = CURRENT_LIMIT_600;
break;
case CURRENT_LIMIT_400:
if (sd_card->func_group4_mask & CURRENT_LIMIT_400_MASK)
func_to_switch = CURRENT_LIMIT_400;
break;
case CURRENT_LIMIT_200:
if (sd_card->func_group4_mask & CURRENT_LIMIT_200_MASK)
func_to_switch = CURRENT_LIMIT_200;
break;
default:
continue;
}
if (func_to_switch != 0xFF)
break;
}
dev_dbg(rtsx_dev(chip), "SD_FUNC_GROUP_4: func_to_switch = 0x%02x",
func_to_switch);
if (func_to_switch <= CURRENT_LIMIT_800) {
retval = sd_check_switch(chip, SD_FUNC_GROUP_4, func_to_switch,
bus_width);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_NO_CARD))
return STATUS_FAIL;
}
dev_dbg(rtsx_dev(chip), "Switch current limit finished! (%d)\n",
retval);
}
if (CHK_SD_DDR50(sd_card)) {
retval = rtsx_write_register(chip, SD_PUSH_POINT_CTL, 0x06, 0);
if (retval)
return retval;
}
return STATUS_SUCCESS;
}
static int sd_wait_data_idle(struct rtsx_chip *chip)
{
int retval = STATUS_TIMEDOUT;
int i;
u8 val = 0;
for (i = 0; i < 100; i++) {
retval = rtsx_read_register(chip, SD_DATA_STATE, &val);
if (retval)
return retval;
if (val & SD_DATA_IDLE) {
retval = STATUS_SUCCESS;
break;
}
udelay(100);
}
dev_dbg(rtsx_dev(chip), "SD_DATA_STATE: 0x%02x\n", val);
return retval;
}
static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
int retval;
u8 cmd[5];
retval = sd_change_phase(chip, sample_point, TUNE_RX);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
cmd[0] = 0x40 | SEND_TUNING_PATTERN;
cmd[1] = 0;
cmd[2] = 0;
cmd[3] = 0;
cmd[4] = 0;
retval = sd_read_data(chip, SD_TM_AUTO_TUNING, cmd, 5, 0x40, 1,
SD_BUS_WIDTH_4, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
(void)sd_wait_data_idle(chip);
rtsx_clear_sd_error(chip);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5];
retval = sd_change_phase(chip, sample_point, TUNE_RX);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
dev_dbg(rtsx_dev(chip), "sd ddr tuning rx\n");
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
cmd[0] = 0x40 | SD_STATUS;
cmd[1] = 0;
cmd[2] = 0;
cmd[3] = 0;
cmd[4] = 0;
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
SD_BUS_WIDTH_4, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
(void)sd_wait_data_idle(chip);
rtsx_clear_sd_error(chip);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int mmc_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], bus_width;
if (CHK_MMC_8BIT(sd_card))
bus_width = SD_BUS_WIDTH_8;
else if (CHK_MMC_4BIT(sd_card))
bus_width = SD_BUS_WIDTH_4;
else
bus_width = SD_BUS_WIDTH_1;
retval = sd_change_phase(chip, sample_point, TUNE_RX);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
dev_dbg(rtsx_dev(chip), "mmc ddr tuning rx\n");
cmd[0] = 0x40 | SEND_EXT_CSD;
cmd[1] = 0;
cmd[2] = 0;
cmd[3] = 0;
cmd[4] = 0;
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 0x200, 1,
bus_width, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
(void)sd_wait_data_idle(chip);
rtsx_clear_sd_error(chip);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
retval = sd_change_phase(chip, sample_point, TUNE_TX);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
SD_RSP_80CLK_TIMEOUT_EN);
if (retval)
return retval;
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) {
rtsx_write_register(chip, SD_CFG3,
SD_RSP_80CLK_TIMEOUT_EN, 0);
return STATUS_FAIL;
}
}
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
0);
if (retval)
return retval;
return STATUS_SUCCESS;
}
static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], bus_width;
retval = sd_change_phase(chip, sample_point, TUNE_TX);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (CHK_SD(sd_card)) {
bus_width = SD_BUS_WIDTH_4;
} else {
if (CHK_MMC_8BIT(sd_card))
bus_width = SD_BUS_WIDTH_8;
else if (CHK_MMC_4BIT(sd_card))
bus_width = SD_BUS_WIDTH_4;
else
bus_width = SD_BUS_WIDTH_1;
}
retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
SD_RSP_80CLK_TIMEOUT_EN);
if (retval)
return retval;
cmd[0] = 0x40 | PROGRAM_CSD;
cmd[1] = 0;
cmd[2] = 0;
cmd[3] = 0;
cmd[4] = 0;
retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2, cmd, 5, 16, 1,
bus_width, sd_card->raw_csd, 16, 100);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
0);
if (retval)
return retval;
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1,
NULL, 0);
return STATUS_SUCCESS;
}
static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
u8 tune_dir)
{
struct sd_info *sd_card = &chip->sd_card;
struct timing_phase_path path[MAX_PHASE + 1];
int i, j, cont_path_cnt;
bool new_block;
int max_len, final_path_idx;
u8 final_phase = 0xFF;
if (phase_map == 0xFFFFFFFF) {
if (tune_dir == TUNE_RX)
final_phase = (u8)chip->sd_default_rx_phase;
else
final_phase = (u8)chip->sd_default_tx_phase;
goto search_finish;
}
cont_path_cnt = 0;
new_block = true;
j = 0;
for (i = 0; i < MAX_PHASE + 1; i++) {
if (phase_map & (1 << i)) {
if (new_block) {
new_block = false;
j = cont_path_cnt++;
path[j].start = i;
path[j].end = i;
} else {
path[j].end = i;
}
} else {
new_block = true;
if (cont_path_cnt) {
int idx = cont_path_cnt - 1;
path[idx].len = path[idx].end -
path[idx].start + 1;
path[idx].mid = path[idx].start +
path[idx].len / 2;
}
}
}
if (cont_path_cnt == 0) {
dev_dbg(rtsx_dev(chip), "No continuous phase path\n");
goto search_finish;
} else {
int idx = cont_path_cnt - 1;
path[idx].len = path[idx].end - path[idx].start + 1;
path[idx].mid = path[idx].start + path[idx].len / 2;
}
if (path[0].start == 0 &&
path[cont_path_cnt - 1].end == MAX_PHASE) {
path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1;
path[0].len += path[cont_path_cnt - 1].len;
path[0].mid = path[0].start + path[0].len / 2;
if (path[0].mid < 0)
path[0].mid += MAX_PHASE + 1;
cont_path_cnt--;
}
max_len = 0;
final_phase = 0;
final_path_idx = 0;
for (i = 0; i < cont_path_cnt; i++) {
if (path[i].len > max_len) {
max_len = path[i].len;
final_phase = (u8)path[i].mid;
final_path_idx = i;
}
dev_dbg(rtsx_dev(chip), "path[%d].start = %d\n",
i, path[i].start);
dev_dbg(rtsx_dev(chip), "path[%d].end = %d\n", i, path[i].end);
dev_dbg(rtsx_dev(chip), "path[%d].len = %d\n", i, path[i].len);
dev_dbg(rtsx_dev(chip), "path[%d].mid = %d\n", i, path[i].mid);
dev_dbg(rtsx_dev(chip), "\n");
}
if (tune_dir == TUNE_TX) {
if (CHK_SD_SDR104(sd_card)) {
if (max_len > 15) {
int temp_mid = (max_len - 16) / 2;
int temp_final_phase =
path[final_path_idx].end -
(max_len - (6 + temp_mid));
if (temp_final_phase < 0)
final_phase = (u8)(temp_final_phase +
MAX_PHASE + 1);
else
final_phase = (u8)temp_final_phase;
}
} else if (CHK_SD_SDR50(sd_card)) {
if (max_len > 12) {
int temp_mid = (max_len - 13) / 2;
int temp_final_phase =
path[final_path_idx].end -
(max_len - (3 + temp_mid));
if (temp_final_phase < 0)
final_phase = (u8)(temp_final_phase +
MAX_PHASE + 1);
else
final_phase = (u8)temp_final_phase;
}
}
}
search_finish:
dev_dbg(rtsx_dev(chip), "Final chosen phase: %d\n", final_phase);
return final_phase;
}
static int sd_tuning_rx(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
int i, j;
u32 raw_phase_map[3], phase_map;
u8 final_phase;
int (*tuning_cmd)(struct rtsx_chip *chip, u8 sample_point);
if (CHK_SD(sd_card)) {
if (CHK_SD_DDR50(sd_card))
tuning_cmd = sd_ddr_tuning_rx_cmd;
else
tuning_cmd = sd_sdr_tuning_rx_cmd;
} else {
if (CHK_MMC_DDR52(sd_card))
tuning_cmd = mmc_ddr_tuning_rx_cmd;
else
return STATUS_FAIL;
}
for (i = 0; i < 3; i++) {
raw_phase_map[i] = 0;
for (j = MAX_PHASE; j >= 0; j--) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
return STATUS_FAIL;
}
retval = tuning_cmd(chip, (u8)j);
if (retval == STATUS_SUCCESS)
raw_phase_map[i] |= 1 << j;
}
}
phase_map = raw_phase_map[0] & raw_phase_map[1] & raw_phase_map[2];
for (i = 0; i < 3; i++)
dev_dbg(rtsx_dev(chip), "RX raw_phase_map[%d] = 0x%08x\n",
i, raw_phase_map[i]);
dev_dbg(rtsx_dev(chip), "RX phase_map = 0x%08x\n", phase_map);
final_phase = sd_search_final_phase(chip, phase_map, TUNE_RX);
if (final_phase == 0xFF)
return STATUS_FAIL;
retval = sd_change_phase(chip, final_phase, TUNE_RX);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
u32 phase_map;
u8 final_phase;
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
SD_RSP_80CLK_TIMEOUT_EN);
if (retval)
return retval;
phase_map = 0;
for (i = MAX_PHASE; i >= 0; i--) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
rtsx_write_register(chip, SD_CFG3,
SD_RSP_80CLK_TIMEOUT_EN, 0);
return STATUS_FAIL;
}
retval = sd_change_phase(chip, (u8)i, TUNE_TX);
if (retval != STATUS_SUCCESS)
continue;
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
sd_card->sd_addr, SD_RSP_TYPE_R1,
NULL, 0);
if (retval == STATUS_SUCCESS ||
!sd_check_err_code(chip, SD_RSP_TIMEOUT))
phase_map |= 1 << i;
}
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
0);
if (retval)
return retval;
dev_dbg(rtsx_dev(chip), "DDR TX pre tune phase_map = 0x%08x\n",
phase_map);
final_phase = sd_search_final_phase(chip, phase_map, TUNE_TX);
if (final_phase == 0xFF)
return STATUS_FAIL;
retval = sd_change_phase(chip, final_phase, TUNE_TX);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
dev_dbg(rtsx_dev(chip), "DDR TX pre tune phase: %d\n",
(int)final_phase);
return STATUS_SUCCESS;
}
static int sd_tuning_tx(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
int i, j;
u32 raw_phase_map[3], phase_map;
u8 final_phase;
int (*tuning_cmd)(struct rtsx_chip *chip, u8 sample_point);
if (CHK_SD(sd_card)) {
if (CHK_SD_DDR50(sd_card))
tuning_cmd = sd_ddr_tuning_tx_cmd;
else
tuning_cmd = sd_sdr_tuning_tx_cmd;
} else {
if (CHK_MMC_DDR52(sd_card))
tuning_cmd = sd_ddr_tuning_tx_cmd;
else
return STATUS_FAIL;
}
for (i = 0; i < 3; i++) {
raw_phase_map[i] = 0;
for (j = MAX_PHASE; j >= 0; j--) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
rtsx_write_register(chip, SD_CFG3,
SD_RSP_80CLK_TIMEOUT_EN, 0);
return STATUS_FAIL;
}
retval = tuning_cmd(chip, (u8)j);
if (retval == STATUS_SUCCESS)
raw_phase_map[i] |= 1 << j;
}
}
phase_map = raw_phase_map[0] & raw_phase_map[1] & raw_phase_map[2];
for (i = 0; i < 3; i++)
dev_dbg(rtsx_dev(chip), "TX raw_phase_map[%d] = 0x%08x\n",
i, raw_phase_map[i]);
dev_dbg(rtsx_dev(chip), "TX phase_map = 0x%08x\n", phase_map);
final_phase = sd_search_final_phase(chip, phase_map, TUNE_TX);
if (final_phase == 0xFF)
return STATUS_FAIL;
retval = sd_change_phase(chip, final_phase, TUNE_TX);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int sd_sdr_tuning(struct rtsx_chip *chip)
{
int retval;
retval = sd_tuning_tx(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = sd_tuning_rx(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int sd_ddr_tuning(struct rtsx_chip *chip)
{
int retval;
if (!(chip->sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) {
retval = sd_ddr_pre_tuning_tx(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
retval = sd_change_phase(chip, (u8)chip->sd_ddr_tx_phase,
TUNE_TX);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
retval = sd_tuning_rx(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (!(chip->sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) {
retval = sd_tuning_tx(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int mmc_ddr_tuning(struct rtsx_chip *chip)
{
int retval;
if (!(chip->sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) {
retval = sd_ddr_pre_tuning_tx(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
retval = sd_change_phase(chip, (u8)chip->mmc_ddr_tx_phase,
TUNE_TX);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
retval = sd_tuning_rx(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (!(chip->sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) {
retval = sd_tuning_tx(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
int sd_switch_clock(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
int re_tuning = 0;
retval = select_card(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = switch_clock(chip, sd_card->sd_clock);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (re_tuning) {
if (CHK_SD(sd_card)) {
if (CHK_SD_DDR50(sd_card))
retval = sd_ddr_tuning(chip);
else
retval = sd_sdr_tuning(chip);
} else {
if (CHK_MMC_DDR52(sd_card))
retval = mmc_ddr_tuning(chip);
}
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int sd_prepare_reset(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
if (chip->asic_code)
sd_card->sd_clock = 29;
else
sd_card->sd_clock = CLK_30;
sd_card->sd_type = 0;
sd_card->seq_mode = 0;
sd_card->sd_data_buf_ready = 0;
sd_card->capacity = 0;
#ifdef SUPPORT_SD_LOCK
sd_card->sd_lock_status = 0;
sd_card->sd_erase_status = 0;
#endif
chip->capacity[chip->card2lun[SD_CARD]] = 0;
chip->sd_io = 0;
retval = sd_set_init_para(chip);
if (retval != STATUS_SUCCESS)
return retval;
retval = rtsx_write_register(chip, REG_SD_CFG1, 0xFF, 0x40);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
SD_STOP | SD_CLR_ERR);
if (retval)
return retval;
retval = select_card(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int sd_pull_ctl_disable(struct rtsx_chip *chip)
{
int retval;
if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
XD_D3_PD | SD_D7_PD | SD_CLK_PD |
SD_D5_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
SD_D6_PD | SD_D0_PD | SD_D1_PD |
XD_D5_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
SD_D4_PD | XD_CE_PD | XD_CLE_PD |
XD_CD_PU);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
XD_RDY_PD | SD_D3_PD | SD_D2_PD |
XD_ALE_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
MS_INS_PU | SD_WP_PD | SD_CD_PU |
SD_CMD_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL6, 0xFF,
MS_D5_PD | MS_D4_PD);
if (retval)
return retval;
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1,
0xFF, 0x55);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL2,
0xFF, 0x55);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL3,
0xFF, 0x4B);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL4,
0xFF, 0x69);
if (retval)
return retval;
}
}
return STATUS_SUCCESS;
}
int sd_pull_ctl_enable(struct rtsx_chip *chip)
{
int retval;
rtsx_init_cmd(chip);
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
0xA8);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
0x5A);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
0x95);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
0xAA);
}
}
retval = rtsx_send_cmd(chip, SD_CARD, 100);
if (retval < 0)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int sd_init_power(struct rtsx_chip *chip)
{
int retval;
retval = sd_power_off_card3v3(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (!chip->ft2_fast_mode)
wait_timeout(250);
retval = enable_card_clock(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (chip->asic_code) {
retval = sd_pull_ctl_enable(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL,
FPGA_SD_PULL_CTL_BIT | 0x20, 0);
if (retval)
return retval;
}
if (!chip->ft2_fast_mode) {
retval = card_power_on(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
wait_timeout(260);
#ifdef SUPPORT_OCP
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
dev_dbg(rtsx_dev(chip), "Over current, OCPSTAT is 0x%x\n",
chip->ocp_stat);
return STATUS_FAIL;
}
#endif
}
retval = rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN,
SD_OUTPUT_EN);
if (retval)
return retval;
return STATUS_SUCCESS;
}
static int sd_dummy_clock(struct rtsx_chip *chip)
{
int retval;
retval = rtsx_write_register(chip, REG_SD_CFG3, 0x01, 0x01);
if (retval)
return retval;
wait_timeout(5);
retval = rtsx_write_register(chip, REG_SD_CFG3, 0x01, 0);
if (retval)
return retval;
return STATUS_SUCCESS;
}
static int sd_read_lba0(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], bus_width;
cmd[0] = 0x40 | READ_SINGLE_BLOCK;
cmd[1] = 0;
cmd[2] = 0;
cmd[3] = 0;
cmd[4] = 0;
if (CHK_SD(sd_card)) {
bus_width = SD_BUS_WIDTH_4;
} else {
if (CHK_MMC_8BIT(sd_card))
bus_width = SD_BUS_WIDTH_8;
else if (CHK_MMC_4BIT(sd_card))
bus_width = SD_BUS_WIDTH_4;
else
bus_width = SD_BUS_WIDTH_1;
}
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 512, 1,
bus_width, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int sd_check_wp_state(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
u32 val;
u16 sd_card_type;
u8 cmd[5], buf[64];
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
cmd[0] = 0x40 | SD_STATUS;
cmd[1] = 0;
cmd[2] = 0;
cmd[3] = 0;
cmd[4] = 0;
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
SD_BUS_WIDTH_4, buf, 64, 250);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
return STATUS_FAIL;
}
dev_dbg(rtsx_dev(chip), "ACMD13:\n");
dev_dbg(rtsx_dev(chip), "%*ph\n", 64, buf);
sd_card_type = ((u16)buf[2] << 8) | buf[3];
dev_dbg(rtsx_dev(chip), "sd_card_type = 0x%04x\n", sd_card_type);
if (sd_card_type == 0x0001 || sd_card_type == 0x0002) {
/* ROM card or OTP */
chip->card_wp |= SD_CARD;
}
/* Check SD Machanical Write-Protect Switch */
val = rtsx_readl(chip, RTSX_BIPR);
if (val & SD_WRITE_PROTECT)
chip->card_wp |= SD_CARD;
return STATUS_SUCCESS;
}
static int reset_sd(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
bool hi_cap_flow = false;
int retval, i = 0, j = 0, k = 0;
bool sd_dont_switch = false;
bool support_1v8 = false;
bool try_sdio = true;
u8 rsp[16];
u8 switch_bus_width;
u32 voltage = 0;
bool sd20_mode = false;
SET_SD(sd_card);
switch_fail:
i = 0;
j = 0;
k = 0;
hi_cap_flow = false;
#ifdef SUPPORT_SD_LOCK
if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON)
goto SD_UNLOCK_ENTRY;
#endif
retval = sd_prepare_reset(chip);
if (retval != STATUS_SUCCESS)
goto status_fail;
retval = sd_dummy_clock(chip);
if (retval != STATUS_SUCCESS)
goto status_fail;
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) && try_sdio) {
int rty_cnt = 0;
for (; rty_cnt < chip->sdio_retry_cnt; rty_cnt++) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
goto status_fail;
}
retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0,
SD_RSP_TYPE_R4, rsp, 5);
if (retval == STATUS_SUCCESS) {
int func_num = (rsp[1] >> 4) & 0x07;
if (func_num) {
dev_dbg(rtsx_dev(chip), "SD_IO card (Function number: %d)!\n",
func_num);
chip->sd_io = 1;
goto status_fail;
}
break;
}
sd_init_power(chip);
sd_dummy_clock(chip);
}
dev_dbg(rtsx_dev(chip), "Normal card!\n");
}
/* Start Initialization Process of SD Card */
RTY_SD_RST:
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
NULL, 0);
if (retval != STATUS_SUCCESS)
goto status_fail;
wait_timeout(20);
retval = sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA,
SD_RSP_TYPE_R7, rsp, 5);
if (retval == STATUS_SUCCESS) {
if (rsp[4] == 0xAA && ((rsp[3] & 0x0f) == 0x01)) {
hi_cap_flow = true;
voltage = SUPPORT_VOLTAGE | 0x40000000;
}
}
if (!hi_cap_flow) {
voltage = SUPPORT_VOLTAGE;
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0,
SD_RSP_TYPE_R0, NULL, 0);
if (retval != STATUS_SUCCESS)
goto status_fail;
wait_timeout(20);
}
do {
retval = sd_send_cmd_get_rsp(chip, APP_CMD, 0, SD_RSP_TYPE_R1,
NULL, 0);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
goto status_fail;
}
j++;
if (j < 3)
goto RTY_SD_RST;
else
goto status_fail;
}
retval = sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage,
SD_RSP_TYPE_R3, rsp, 5);
if (retval != STATUS_SUCCESS) {
k++;
if (k < 3)
goto RTY_SD_RST;
else
goto status_fail;
}
i++;
wait_timeout(20);
} while (!(rsp[1] & 0x80) && (i < 255));
if (i == 255)
goto status_fail;
if (hi_cap_flow) {
if (rsp[1] & 0x40)
SET_SD_HCXC(sd_card);
else
CLR_SD_HCXC(sd_card);
support_1v8 = false;
} else {
CLR_SD_HCXC(sd_card);
support_1v8 = false;
}
dev_dbg(rtsx_dev(chip), "support_1v8 = %d\n", support_1v8);
if (support_1v8) {
retval = sd_voltage_switch(chip);
if (retval != STATUS_SUCCESS)
goto status_fail;
}
retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
NULL, 0);
if (retval != STATUS_SUCCESS)
goto status_fail;
for (i = 0; i < 3; i++) {
retval = sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0,
SD_RSP_TYPE_R6, rsp, 5);
if (retval != STATUS_SUCCESS)
goto status_fail;
sd_card->sd_addr = (u32)rsp[1] << 24;
sd_card->sd_addr += (u32)rsp[2] << 16;
if (sd_card->sd_addr)
break;
}
retval = sd_check_csd(chip, 1);
if (retval != STATUS_SUCCESS)
goto status_fail;
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS)
goto status_fail;
#ifdef SUPPORT_SD_LOCK
SD_UNLOCK_ENTRY:
retval = sd_update_lock_status(chip);
if (retval != STATUS_SUCCESS)
goto status_fail;
if (sd_card->sd_lock_status & SD_LOCKED) {
sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST);
return STATUS_SUCCESS;
} else if (!(sd_card->sd_lock_status & SD_UNLOCK_POW_ON)) {
sd_card->sd_lock_status &= ~SD_PWD_EXIST;
}
#endif
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
goto status_fail;
retval = sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
goto status_fail;
if (support_1v8) {
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
goto status_fail;
retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
goto status_fail;
switch_bus_width = SD_BUS_WIDTH_4;
} else {
switch_bus_width = SD_BUS_WIDTH_1;
}
retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
NULL, 0);
if (retval != STATUS_SUCCESS)
goto status_fail;
retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
if (retval != STATUS_SUCCESS)
goto status_fail;
if (!(sd_card->raw_csd[4] & 0x40))
sd_dont_switch = true;
if (!sd_dont_switch) {
if (sd20_mode) {
/* Set sd_switch_fail here, because we needn't
* switch to UHS mode
*/
sd_card->sd_switch_fail = SDR104_SUPPORT_MASK |
DDR50_SUPPORT_MASK | SDR50_SUPPORT_MASK;
}
/* Check the card whether follow SD1.1 spec or higher */
retval = sd_check_spec(chip, switch_bus_width);
if (retval == STATUS_SUCCESS) {
retval = sd_switch_function(chip, switch_bus_width);
if (retval != STATUS_SUCCESS) {
sd_init_power(chip);
sd_dont_switch = true;
try_sdio = false;
goto switch_fail;
}
} else {
if (support_1v8) {
sd_init_power(chip);
sd_dont_switch = true;
try_sdio = false;
goto switch_fail;
}
}
}
if (!support_1v8) {
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
goto status_fail;
retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
goto status_fail;
}
#ifdef SUPPORT_SD_LOCK
sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
#endif
if (!sd20_mode && CHK_SD30_SPEED(sd_card)) {
int read_lba0 = 1;
retval = rtsx_write_register(chip, SD30_DRIVE_SEL, 0x07,
chip->sd30_drive_sel_1v8);
if (retval)
return retval;
retval = sd_set_init_para(chip);
if (retval != STATUS_SUCCESS)
goto status_fail;
if (CHK_SD_DDR50(sd_card))
retval = sd_ddr_tuning(chip);
else
retval = sd_sdr_tuning(chip);
if (retval != STATUS_SUCCESS) {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS)
goto status_fail;
try_sdio = false;
sd20_mode = true;
goto switch_fail;
}
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
if (CHK_SD_DDR50(sd_card)) {
retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
if (retval != STATUS_SUCCESS)
read_lba0 = 0;
}
if (read_lba0) {
retval = sd_read_lba0(chip);
if (retval != STATUS_SUCCESS) {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS)
goto status_fail;
try_sdio = false;
sd20_mode = true;
goto switch_fail;
}
}
}
retval = sd_check_wp_state(chip);
if (retval != STATUS_SUCCESS)
goto status_fail;
chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
#ifdef SUPPORT_SD_LOCK
if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) {
retval = rtsx_write_register(chip, REG_SD_BLOCK_CNT_H, 0xFF,
0x02);
if (retval)
return retval;
retval = rtsx_write_register(chip, REG_SD_BLOCK_CNT_L, 0xFF,
0x00);
if (retval)
return retval;
}
#endif
return STATUS_SUCCESS;
status_fail:
return STATUS_FAIL;
}
static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 buf[8] = {0}, bus_width, *ptr;
u16 byte_cnt;
int len;
retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL,
0);
if (retval != STATUS_SUCCESS)
return SWITCH_FAIL;
if (width == MMC_8BIT_BUS) {
buf[0] = 0x55;
buf[1] = 0xAA;
len = 8;
byte_cnt = 8;
bus_width = SD_BUS_WIDTH_8;
} else {
buf[0] = 0x5A;
len = 4;
byte_cnt = 4;
bus_width = SD_BUS_WIDTH_4;
}
retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0x02);
if (retval != STATUS_SUCCESS)
return SWITCH_ERR;
retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3, NULL, 0, byte_cnt, 1,
bus_width, buf, len, 100);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
return SWITCH_ERR;
}
retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
if (retval != STATUS_SUCCESS)
return SWITCH_ERR;
dev_dbg(rtsx_dev(chip), "SD/MMC CMD %d\n", BUSTEST_R);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | BUSTEST_R);
if (width == MMC_8BIT_BUS)
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
0xFF, 0x08);
else
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
0xFF, 0x04);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, SD_CALCULATE_CRC7 |
SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
SD_CHECK_CRC7 | SD_RSP_LEN_6);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_NORMAL_READ | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
SD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2, 0, 0);
if (width == MMC_8BIT_BUS)
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 1, 0, 0);
retval = rtsx_send_cmd(chip, SD_CARD, 100);
if (retval < 0) {
rtsx_clear_sd_error(chip);
return SWITCH_ERR;
}
ptr = rtsx_get_cmd_data(chip) + 1;
if (width == MMC_8BIT_BUS) {
dev_dbg(rtsx_dev(chip), "BUSTEST_R [8bits]: 0x%02x 0x%02x\n",
ptr[0], ptr[1]);
if (ptr[0] == 0xAA && ptr[1] == 0x55) {
u8 rsp[5];
u32 arg;
if (CHK_MMC_DDR52(sd_card))
arg = 0x03B70600;
else
arg = 0x03B70200;
retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
SD_RSP_TYPE_R1b, rsp, 5);
if (retval == STATUS_SUCCESS &&
!(rsp[4] & MMC_SWITCH_ERR))
return SWITCH_SUCCESS;
}
} else {
dev_dbg(rtsx_dev(chip), "BUSTEST_R [4bits]: 0x%02x\n", ptr[0]);
if (ptr[0] == 0xA5) {
u8 rsp[5];
u32 arg;
if (CHK_MMC_DDR52(sd_card))
arg = 0x03B70500;
else
arg = 0x03B70100;
retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
SD_RSP_TYPE_R1b, rsp, 5);
if (retval == STATUS_SUCCESS &&
!(rsp[4] & MMC_SWITCH_ERR))
return SWITCH_SUCCESS;
}
}
return SWITCH_FAIL;
}
static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 *ptr, card_type, card_type_mask = 0;
CLR_MMC_HS(sd_card);
dev_dbg(rtsx_dev(chip), "SD/MMC CMD %d\n", SEND_EXT_CSD);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
0x40 | SEND_EXT_CSD);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 2);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
SD_CHECK_CRC7 | SD_RSP_LEN_6);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_NORMAL_READ | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
SD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 196, 0xFF, 0);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 212, 0xFF, 0);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 213, 0xFF, 0);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 214, 0xFF, 0);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 215, 0xFF, 0);
retval = rtsx_send_cmd(chip, SD_CARD, 1000);
if (retval < 0) {
if (retval == -ETIMEDOUT) {
rtsx_clear_sd_error(chip);
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
}
return STATUS_FAIL;
}
ptr = rtsx_get_cmd_data(chip);
if (ptr[0] & SD_TRANSFER_ERR) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
return STATUS_FAIL;
}
if (CHK_MMC_SECTOR_MODE(sd_card)) {
sd_card->capacity = ((u32)ptr[5] << 24) | ((u32)ptr[4] << 16) |
((u32)ptr[3] << 8) | ((u32)ptr[2]);
}
card_type_mask = 0x03;
card_type = ptr[1] & card_type_mask;
if (card_type) {
u8 rsp[5];
if (card_type & 0x04) {
if (switch_ddr)
SET_MMC_DDR52(sd_card);
else
SET_MMC_52M(sd_card);
} else if (card_type & 0x02) {
SET_MMC_52M(sd_card);
} else {
SET_MMC_26M(sd_card);
}
retval = sd_send_cmd_get_rsp(chip, SWITCH, 0x03B90100,
SD_RSP_TYPE_R1b, rsp, 5);
if (retval != STATUS_SUCCESS || (rsp[4] & MMC_SWITCH_ERR))
CLR_MMC_HS(sd_card);
}
sd_choose_proper_clock(chip);
retval = switch_clock(chip, sd_card->sd_clock);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
/* Test Bus Procedure */
retval = mmc_test_switch_bus(chip, MMC_8BIT_BUS);
if (retval == SWITCH_SUCCESS) {
SET_MMC_8BIT(sd_card);
chip->card_bus_width[chip->card2lun[SD_CARD]] = 8;
#ifdef SUPPORT_SD_LOCK
sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
#endif
} else if (retval == SWITCH_FAIL) {
retval = mmc_test_switch_bus(chip, MMC_4BIT_BUS);
if (retval == SWITCH_SUCCESS) {
SET_MMC_4BIT(sd_card);
chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
#ifdef SUPPORT_SD_LOCK
sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
#endif
} else if (retval == SWITCH_FAIL) {
CLR_MMC_8BIT(sd_card);
CLR_MMC_4BIT(sd_card);
} else {
return STATUS_FAIL;
}
} else {
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int reset_mmc(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval, i = 0, j = 0, k = 0;
bool switch_ddr = true;
u8 rsp[16];
u8 spec_ver = 0;
u32 temp;
#ifdef SUPPORT_SD_LOCK
if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON)
goto MMC_UNLOCK_ENTRY;
#endif
switch_fail:
retval = sd_prepare_reset(chip);
if (retval != STATUS_SUCCESS)
return retval;
SET_MMC(sd_card);
RTY_MMC_RST:
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
NULL, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
do {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
return STATUS_FAIL;
}
retval = sd_send_cmd_get_rsp(chip, SEND_OP_COND,
(SUPPORT_VOLTAGE | 0x40000000),
SD_RSP_TYPE_R3, rsp, 5);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_BUSY) ||
sd_check_err_code(chip, SD_TO_ERR)) {
k++;
if (k < 20) {
sd_clr_err_code(chip);
goto RTY_MMC_RST;
} else {
return STATUS_FAIL;
}
} else {
j++;
if (j < 100) {
sd_clr_err_code(chip);
goto RTY_MMC_RST;
} else {
return STATUS_FAIL;
}
}
}
wait_timeout(20);
i++;
} while (!(rsp[1] & 0x80) && (i < 255));
if (i == 255)
return STATUS_FAIL;
if ((rsp[1] & 0x60) == 0x40)
SET_MMC_SECTOR_MODE(sd_card);
else
CLR_MMC_SECTOR_MODE(sd_card);
retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
NULL, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
sd_card->sd_addr = 0x00100000;
retval = sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr,
SD_RSP_TYPE_R6, rsp, 5);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = sd_check_csd(chip, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
spec_ver = (sd_card->raw_csd[0] & 0x3C) >> 2;
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
NULL, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
#ifdef SUPPORT_SD_LOCK
MMC_UNLOCK_ENTRY:
retval = sd_update_lock_status(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
#endif
retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
chip->card_bus_width[chip->card2lun[SD_CARD]] = 1;
if (!sd_card->mmc_dont_switch_bus) {
if (spec_ver == 4) {
/* MMC 4.x Cards */
retval = mmc_switch_timing_bus(chip, switch_ddr);
if (retval != STATUS_SUCCESS) {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
sd_card->mmc_dont_switch_bus = 1;
goto switch_fail;
}
}
if (CHK_MMC_SECTOR_MODE(sd_card) && sd_card->capacity == 0)
return STATUS_FAIL;
if (switch_ddr && CHK_MMC_DDR52(sd_card)) {
retval = sd_set_init_para(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = mmc_ddr_tuning(chip);
if (retval != STATUS_SUCCESS) {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
switch_ddr = false;
goto switch_fail;
}
retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
if (retval == STATUS_SUCCESS) {
retval = sd_read_lba0(chip);
if (retval != STATUS_SUCCESS) {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
switch_ddr = false;
goto switch_fail;
}
}
}
}
#ifdef SUPPORT_SD_LOCK
if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) {
retval = rtsx_write_register(chip, REG_SD_BLOCK_CNT_H, 0xFF,
0x02);
if (retval)
return retval;
retval = rtsx_write_register(chip, REG_SD_BLOCK_CNT_L, 0xFF,
0x00);
if (retval)
return retval;
}
#endif
temp = rtsx_readl(chip, RTSX_BIPR);
if (temp & SD_WRITE_PROTECT)
chip->card_wp |= SD_CARD;
return STATUS_SUCCESS;
}
int reset_sd_card(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
sd_init_reg_addr(chip);
memset(sd_card, 0, sizeof(struct sd_info));
chip->capacity[chip->card2lun[SD_CARD]] = 0;
retval = enable_card_clock(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (chip->ignore_sd && CHK_SDIO_EXIST(chip) &&
!CHK_SDIO_IGNORED(chip)) {
if (chip->asic_code) {
retval = sd_pull_ctl_enable(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL,
FPGA_SD_PULL_CTL_BIT |
0x20, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
retval = card_share_mode(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
chip->sd_io = 1;
return STATUS_FAIL;
}
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (chip->sd_ctl & RESET_MMC_FIRST) {
retval = reset_mmc(chip);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_NO_CARD))
return STATUS_FAIL;
retval = reset_sd(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
} else {
retval = reset_sd(chip);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_NO_CARD))
return STATUS_FAIL;
if (chip->sd_io)
return STATUS_FAIL;
retval = reset_mmc(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
}
retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_register(chip, REG_SD_BYTE_CNT_L, 0xFF, 0);
if (retval)
return retval;
retval = rtsx_write_register(chip, REG_SD_BYTE_CNT_H, 0xFF, 2);
if (retval)
return retval;
chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity;
retval = sd_set_init_para(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
dev_dbg(rtsx_dev(chip), "sd_card->sd_type = 0x%x\n", sd_card->sd_type);
return STATUS_SUCCESS;
}
static int reset_mmc_only(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
sd_card->sd_type = 0;
sd_card->seq_mode = 0;
sd_card->sd_data_buf_ready = 0;
sd_card->capacity = 0;
sd_card->sd_switch_fail = 0;
#ifdef SUPPORT_SD_LOCK
sd_card->sd_lock_status = 0;
sd_card->sd_erase_status = 0;
#endif
chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity = 0;
retval = enable_card_clock(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = reset_mmc(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_register(chip, REG_SD_BYTE_CNT_L, 0xFF, 0);
if (retval)
return retval;
retval = rtsx_write_register(chip, REG_SD_BYTE_CNT_H, 0xFF, 2);
if (retval)
return retval;
chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity;
retval = sd_set_init_para(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
dev_dbg(rtsx_dev(chip), "In %s, sd_card->sd_type = 0x%x\n",
__func__, sd_card->sd_type);
return STATUS_SUCCESS;
}
#define WAIT_DATA_READY_RTY_CNT 255
static int wait_data_buf_ready(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int i, retval;
for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
return STATUS_FAIL;
}
sd_card->sd_data_buf_ready = 0;
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
sd_card->sd_addr, SD_RSP_TYPE_R1,
NULL, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (sd_card->sd_data_buf_ready) {
return sd_send_cmd_get_rsp(chip, SEND_STATUS,
sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
}
}
sd_set_err_code(chip, SD_TO_ERR);
return STATUS_FAIL;
}
void sd_stop_seq_mode(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
if (sd_card->seq_mode) {
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return;
retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
SD_RSP_TYPE_R1b, NULL, 0);
if (retval != STATUS_SUCCESS)
sd_set_err_code(chip, SD_STS_ERR);
retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
if (retval != STATUS_SUCCESS)
sd_set_err_code(chip, SD_STS_ERR);
sd_card->seq_mode = 0;
rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
}
}
static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
if (chip->asic_code) {
if (sd_card->sd_clock > 30)
sd_card->sd_clock -= 20;
} else {
switch (sd_card->sd_clock) {
case CLK_200:
sd_card->sd_clock = CLK_150;
break;
case CLK_150:
sd_card->sd_clock = CLK_120;
break;
case CLK_120:
sd_card->sd_clock = CLK_100;
break;
case CLK_100:
sd_card->sd_clock = CLK_80;
break;
case CLK_80:
sd_card->sd_clock = CLK_60;
break;
case CLK_60:
sd_card->sd_clock = CLK_50;
break;
default:
break;
}
}
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
u16 sector_cnt)
{
struct sd_info *sd_card = &chip->sd_card;
u32 data_addr;
u8 cfg2;
int retval;
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
dev_dbg(rtsx_dev(chip), "%s: Read %d %s from 0x%x\n", __func__,
sector_cnt, (sector_cnt > 1) ? "sectors" : "sector",
start_sector);
} else {
dev_dbg(rtsx_dev(chip), "%s: Write %d %s to 0x%x\n", __func__,
sector_cnt, (sector_cnt > 1) ? "sectors" : "sector",
start_sector);
}
sd_card->cleanup_counter = 0;
if (!(chip->card_ready & SD_CARD)) {
sd_card->seq_mode = 0;
retval = reset_sd_card(chip);
if (retval == STATUS_SUCCESS) {
chip->card_ready |= SD_CARD;
chip->card_fail &= ~SD_CARD;
} else {
chip->card_ready &= ~SD_CARD;
chip->card_fail |= SD_CARD;
chip->capacity[chip->card2lun[SD_CARD]] = 0;
chip->rw_need_retry = 1;
return STATUS_FAIL;
}
}
if (!CHK_SD_HCXC(sd_card) && !CHK_MMC_SECTOR_MODE(sd_card))
data_addr = start_sector << 9;
else
data_addr = start_sector;
sd_clr_err_code(chip);
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_IO_ERR);
goto RW_FAIL;
}
if (sd_card->seq_mode &&
(sd_card->pre_dir != srb->sc_data_direction ||
((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) !=
start_sector))) {
if (sd_card->pre_sec_cnt < 0x80 &&
sd_card->pre_dir == DMA_FROM_DEVICE &&
!CHK_SD30_SPEED(sd_card) &&
!CHK_SD_HS(sd_card) &&
!CHK_MMC_HS(sd_card)) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
}
retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
SD_RSP_TYPE_R1b, NULL, 0);
if (retval != STATUS_SUCCESS) {
chip->rw_need_retry = 1;
sd_set_err_code(chip, SD_STS_ERR);
goto RW_FAIL;
}
sd_card->seq_mode = 0;
retval = rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
if (retval != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_IO_ERR);
goto RW_FAIL;
}
if (sd_card->pre_sec_cnt < 0x80 &&
!CHK_SD30_SPEED(sd_card) &&
!CHK_SD_HS(sd_card) &&
!CHK_MMC_HS(sd_card)) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
}
}
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
(u8)sector_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
(u8)(sector_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
if (CHK_MMC_8BIT(sd_card))
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
0x03, SD_BUS_WIDTH_8);
else if (CHK_MMC_4BIT(sd_card) || CHK_SD(sd_card))
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
0x03, SD_BUS_WIDTH_4);
else
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
0x03, SD_BUS_WIDTH_1);
if (sd_card->seq_mode) {
cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 |
SD_RSP_LEN_0;
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2);
trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512,
DMA_512);
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_READ_3 | SD_TRANSFER_START);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
}
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
SD_TRANSFER_END, SD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
} else {
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
dev_dbg(rtsx_dev(chip), "SD/MMC CMD %d\n",
READ_MULTIPLE_BLOCK);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
0x40 | READ_MULTIPLE_BLOCK);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
(u8)(data_addr >> 24));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
(u8)(data_addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
(u8)(data_addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
(u8)data_addr);
cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 |
SD_RSP_LEN_6;
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
cfg2);
trans_dma_enable(srb->sc_data_direction, chip,
sector_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
SD_TRANSFER_END, SD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
} else {
retval = rtsx_send_cmd(chip, SD_CARD, 50);
if (retval < 0) {
rtsx_clear_sd_error(chip);
chip->rw_need_retry = 1;
sd_set_err_code(chip, SD_TO_ERR);
goto RW_FAIL;
}
retval = wait_data_buf_ready(chip);
if (retval != STATUS_SUCCESS) {
chip->rw_need_retry = 1;
sd_set_err_code(chip, SD_TO_ERR);
goto RW_FAIL;
}
retval = sd_send_cmd_get_rsp(chip, WRITE_MULTIPLE_BLOCK,
data_addr, SD_RSP_TYPE_R1,
NULL, 0);
if (retval != STATUS_SUCCESS) {
chip->rw_need_retry = 1;
goto RW_FAIL;
}
rtsx_init_cmd(chip);
cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
SD_NO_WAIT_BUSY_END |
SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
cfg2);
trans_dma_enable(srb->sc_data_direction, chip,
sector_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
SD_TRANSFER_END, SD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
}
sd_card->seq_mode = 1;
}
retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
scsi_bufflen(srb), scsi_sg_count(srb),
srb->sc_data_direction, chip->sd_timeout);
if (retval < 0) {
u8 stat = 0;
int err;
sd_card->seq_mode = 0;
if (retval == -ETIMEDOUT)
err = STATUS_TIMEDOUT;
else
err = STATUS_FAIL;
rtsx_read_register(chip, REG_SD_STAT1, &stat);
rtsx_clear_sd_error(chip);
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
chip->rw_need_retry = 0;
dev_dbg(rtsx_dev(chip), "No card exist, exit %s\n",
__func__);
return STATUS_FAIL;
}
chip->rw_need_retry = 1;
retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
SD_RSP_TYPE_R1b, NULL, 0);
if (retval != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_STS_ERR);
goto RW_FAIL;
}
if (stat & (SD_CRC7_ERR | SD_CRC16_ERR | SD_CRC_WRITE_ERR)) {
dev_dbg(rtsx_dev(chip), "SD CRC error, tune clock!\n");
sd_set_err_code(chip, SD_CRC_ERR);
goto RW_FAIL;
}
if (err == STATUS_TIMEDOUT) {
sd_set_err_code(chip, SD_TO_ERR);
goto RW_FAIL;
}
return err;
}
sd_card->pre_sec_addr = start_sector;
sd_card->pre_sec_cnt = sector_cnt;
sd_card->pre_dir = srb->sc_data_direction;
return STATUS_SUCCESS;
RW_FAIL:
sd_card->seq_mode = 0;
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
chip->rw_need_retry = 0;
dev_dbg(rtsx_dev(chip), "No card exist, exit %s\n", __func__);
return STATUS_FAIL;
}
if (sd_check_err_code(chip, SD_CRC_ERR)) {
if (CHK_MMC_4BIT(sd_card) || CHK_MMC_8BIT(sd_card)) {
sd_card->mmc_dont_switch_bus = 1;
reset_mmc_only(chip);
sd_card->mmc_dont_switch_bus = 0;
} else {
sd_card->need_retune = 1;
sd_auto_tune_clock(chip);
}
} else if (sd_check_err_code(chip, SD_TO_ERR | SD_STS_ERR)) {
retval = reset_sd_card(chip);
if (retval != STATUS_SUCCESS) {
chip->card_ready &= ~SD_CARD;
chip->card_fail |= SD_CARD;
chip->capacity[chip->card2lun[SD_CARD]] = 0;
}
}
return STATUS_FAIL;
}
#ifdef SUPPORT_CPRM
int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, u32 arg,
u8 rsp_type, u8 *rsp, int rsp_len,
bool special_check)
{
int retval;
int timeout = 100;
u16 reg_addr;
u8 *ptr;
int stat_idx = 0;
int rty_cnt = 0;
dev_dbg(rtsx_dev(chip), "EXT SD/MMC CMD %d\n", cmd_idx);
if (rsp_type == SD_RSP_TYPE_R1b)
timeout = 3000;
RTY_SEND_CMD:
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | cmd_idx);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, (u8)(arg >> 24));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, (u8)(arg >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, (u8)(arg >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, (u8)arg);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
SD_TRANSFER_END);
if (rsp_type == SD_RSP_TYPE_R2) {
for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
reg_addr++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
stat_idx = 17;
} else if (rsp_type != SD_RSP_TYPE_R0) {
for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4;
reg_addr++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
stat_idx = 6;
}
rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_CMD5, 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_STAT1, 0, 0);
retval = rtsx_send_cmd(chip, SD_CARD, timeout);
if (retval < 0) {
if (retval == -ETIMEDOUT) {
rtsx_clear_sd_error(chip);
if (rsp_type & SD_WAIT_BUSY_END) {
retval = sd_check_data0_status(chip);
if (retval != STATUS_SUCCESS)
return retval;
} else {
sd_set_err_code(chip, SD_TO_ERR);
}
}
return STATUS_FAIL;
}
if (rsp_type == SD_RSP_TYPE_R0)
return STATUS_SUCCESS;
ptr = rtsx_get_cmd_data(chip) + 1;
if ((ptr[0] & 0xC0) != 0) {
sd_set_err_code(chip, SD_STS_ERR);
return STATUS_FAIL;
}
if (!(rsp_type & SD_NO_CHECK_CRC7)) {
if (ptr[stat_idx] & SD_CRC7_ERR) {
if (cmd_idx == WRITE_MULTIPLE_BLOCK) {
sd_set_err_code(chip, SD_CRC_ERR);
return STATUS_FAIL;
}
if (rty_cnt < SD_MAX_RETRY_COUNT) {
wait_timeout(20);
rty_cnt++;
goto RTY_SEND_CMD;
} else {
sd_set_err_code(chip, SD_CRC_ERR);
return STATUS_FAIL;
}
}
}
if (cmd_idx == SELECT_CARD || cmd_idx == APP_CMD ||
cmd_idx == SEND_STATUS || cmd_idx == STOP_TRANSMISSION) {
if (cmd_idx != STOP_TRANSMISSION && !special_check) {
if (ptr[1] & 0x80)
return STATUS_FAIL;
}
#ifdef SUPPORT_SD_LOCK
if (ptr[1] & 0x7D) {
#else
if (ptr[1] & 0x7F) {
#endif
return STATUS_FAIL;
}
if (ptr[2] & 0xF8)
return STATUS_FAIL;
if (cmd_idx == SELECT_CARD) {
if (rsp_type == SD_RSP_TYPE_R2) {
if ((ptr[3] & 0x1E) != 0x04)
return STATUS_FAIL;
}
}
}
if (rsp && rsp_len)
memcpy(rsp, ptr, rsp_len);
return STATUS_SUCCESS;
}
int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type)
{
int retval, rsp_len;
u16 reg_addr;
if (rsp_type == SD_RSP_TYPE_R0)
return STATUS_SUCCESS;
rtsx_init_cmd(chip);
if (rsp_type == SD_RSP_TYPE_R2) {
for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
reg_addr++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0xFF, 0);
rsp_len = 17;
} else if (rsp_type != SD_RSP_TYPE_R0) {
for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4;
reg_addr++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0xFF, 0);
rsp_len = 6;
}
rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_CMD5, 0xFF, 0);
retval = rtsx_send_cmd(chip, SD_CARD, 100);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (rsp) {
int min_len = (rsp_len < len) ? rsp_len : len;
memcpy(rsp, rtsx_get_cmd_data(chip), min_len);
dev_dbg(rtsx_dev(chip), "min_len = %d\n", min_len);
dev_dbg(rtsx_dev(chip), "Response in cmd buf: 0x%x 0x%x 0x%x 0x%x\n",
rsp[0], rsp[1], rsp[2], rsp[3]);
}
return STATUS_SUCCESS;
}
int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int len;
u8 buf[18] = {
0x00,
0x00,
0x00,
0x0E,
0x00,
0x00,
0x00,
0x00,
0x53,
0x44,
0x20,
0x43,
0x61,
0x72,
0x64,
0x00,
0x00,
0x00,
};
sd_card->pre_cmd_err = 0;
if (!(CHK_BIT(chip->lun_mc, lun))) {
SET_BIT(chip->lun_mc, lun);
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
if (srb->cmnd[2] != 0x53 || srb->cmnd[3] != 0x44 ||
srb->cmnd[4] != 0x20 || srb->cmnd[5] != 0x43 ||
srb->cmnd[6] != 0x61 || srb->cmnd[7] != 0x72 ||
srb->cmnd[8] != 0x64) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
switch (srb->cmnd[1] & 0x0F) {
case 0:
sd_card->sd_pass_thru_en = 0;
break;
case 1:
sd_card->sd_pass_thru_en = 1;
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
buf[5] = (CHK_SD(sd_card) == 1) ? 0x01 : 0x02;
if (chip->card_wp & SD_CARD)
buf[5] |= 0x80;
buf[6] = (u8)(sd_card->sd_addr >> 16);
buf[7] = (u8)(sd_card->sd_addr >> 24);
buf[15] = chip->max_lun;
len = min_t(int, 18, scsi_bufflen(srb));
rtsx_stor_set_xfer_buf(buf, len, srb);
return TRANSPORT_GOOD;
}
static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type,
int *rsp_len)
{
if (!rsp_type || !rsp_len)
return STATUS_FAIL;
switch (srb->cmnd[10]) {
case 0x03:
*rsp_type = SD_RSP_TYPE_R0;
*rsp_len = 0;
break;
case 0x04:
*rsp_type = SD_RSP_TYPE_R1;
*rsp_len = 6;
break;
case 0x05:
*rsp_type = SD_RSP_TYPE_R1b;
*rsp_len = 6;
break;
case 0x06:
*rsp_type = SD_RSP_TYPE_R2;
*rsp_len = 17;
break;
case 0x07:
*rsp_type = SD_RSP_TYPE_R3;
*rsp_len = 6;
break;
default:
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval, rsp_len;
u8 cmd_idx, rsp_type;
bool standby = false, acmd = false;
u32 arg;
if (!sd_card->sd_pass_thru_en) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
if (sd_card->pre_cmd_err) {
sd_card->pre_cmd_err = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
cmd_idx = srb->cmnd[2] & 0x3F;
if (srb->cmnd[1] & 0x02)
standby = true;
if (srb->cmnd[1] & 0x01)
acmd = true;
arg = ((u32)srb->cmnd[3] << 24) | ((u32)srb->cmnd[4] << 16) |
((u32)srb->cmnd[5] << 8) | srb->cmnd[6];
retval = get_rsp_type(srb, &rsp_type, &rsp_len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
sd_card->last_rsp_type = rsp_type;
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
#ifdef SUPPORT_SD_LOCK
if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
if (CHK_MMC_8BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
SD_BUS_WIDTH_8);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
} else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
}
}
#else
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
#endif
if (standby) {
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS)
goto sd_execute_cmd_failed;
}
if (acmd) {
retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS)
goto sd_execute_cmd_failed;
}
retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
sd_card->rsp, rsp_len, false);
if (retval != STATUS_SUCCESS)
goto sd_execute_cmd_failed;
if (standby) {
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS)
goto sd_execute_cmd_failed;
}
#ifdef SUPPORT_SD_LOCK
retval = sd_update_lock_status(chip);
if (retval != STATUS_SUCCESS)
goto sd_execute_cmd_failed;
#endif
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
sd_execute_cmd_failed:
sd_card->pre_cmd_err = 1;
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
release_sd_card(chip);
do_reset_sd_card(chip);
if (!(chip->card_ready & SD_CARD))
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval, rsp_len, i;
bool read_err = false, cmd13_checkbit = false;
u8 cmd_idx, rsp_type, bus_width;
bool standby = false, send_cmd12 = false, acmd = false;
u32 data_len;
if (!sd_card->sd_pass_thru_en) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
if (sd_card->pre_cmd_err) {
sd_card->pre_cmd_err = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
cmd_idx = srb->cmnd[2] & 0x3F;
if (srb->cmnd[1] & 0x04)
send_cmd12 = true;
if (srb->cmnd[1] & 0x02)
standby = true;
if (srb->cmnd[1] & 0x01)
acmd = true;
data_len = ((u32)srb->cmnd[7] << 16) | ((u32)srb->cmnd[8]
<< 8) | srb->cmnd[9];
retval = get_rsp_type(srb, &rsp_type, &rsp_len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
sd_card->last_rsp_type = rsp_type;
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
#ifdef SUPPORT_SD_LOCK
if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
if (CHK_MMC_8BIT(sd_card))
bus_width = SD_BUS_WIDTH_8;
else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card))
bus_width = SD_BUS_WIDTH_4;
else
bus_width = SD_BUS_WIDTH_1;
} else {
bus_width = SD_BUS_WIDTH_4;
}
dev_dbg(rtsx_dev(chip), "bus_width = %d\n", bus_width);
#else
bus_width = SD_BUS_WIDTH_4;
#endif
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS)
goto sd_execute_read_cmd_failed;
}
if (standby) {
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS)
goto sd_execute_read_cmd_failed;
}
if (acmd) {
retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS)
goto sd_execute_read_cmd_failed;
}
if (data_len <= 512) {
int min_len;
u8 *buf;
u16 byte_cnt, blk_cnt;
u8 cmd[5];
byte_cnt = ((u16)(srb->cmnd[8] & 0x03) << 8) | srb->cmnd[9];
blk_cnt = 1;
cmd[0] = 0x40 | cmd_idx;
cmd[1] = srb->cmnd[3];
cmd[2] = srb->cmnd[4];
cmd[3] = srb->cmnd[5];
cmd[4] = srb->cmnd[6];
buf = kmalloc(data_len, GFP_KERNEL);
if (!buf)
return TRANSPORT_ERROR;
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, byte_cnt,
blk_cnt, bus_width, buf, data_len, 2000);
if (retval != STATUS_SUCCESS) {
read_err = true;
kfree(buf);
rtsx_clear_sd_error(chip);
goto sd_execute_read_cmd_failed;
}
min_len = min(data_len, scsi_bufflen(srb));
rtsx_stor_set_xfer_buf(buf, min_len, srb);
kfree(buf);
} else if (!(data_len & 0x1FF)) {
rtsx_init_cmd(chip);
trans_dma_enable(DMA_FROM_DEVICE, chip, data_len, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
0x02);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
0xFF, (srb->cmnd[7] & 0xFE) >> 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
0xFF, (u8)((data_len & 0x0001FE00) >> 9));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
0x40 | cmd_idx);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
srb->cmnd[3]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
srb->cmnd[4]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
srb->cmnd[5]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
srb->cmnd[6]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
SD_TRANSFER_END, SD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
scsi_bufflen(srb),
scsi_sg_count(srb),
DMA_FROM_DEVICE, 10000);
if (retval < 0) {
read_err = true;
rtsx_clear_sd_error(chip);
goto sd_execute_read_cmd_failed;
}
} else {
goto sd_execute_read_cmd_failed;
}
retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type);
if (retval != STATUS_SUCCESS)
goto sd_execute_read_cmd_failed;
if (standby) {
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS)
goto sd_execute_read_cmd_failed;
}
if (send_cmd12) {
retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
SD_RSP_TYPE_R1b, NULL, 0,
false);
if (retval != STATUS_SUCCESS)
goto sd_execute_read_cmd_failed;
}
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS)
goto sd_execute_read_cmd_failed;
retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
if (retval != STATUS_SUCCESS)
goto sd_execute_read_cmd_failed;
retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
if (retval != STATUS_SUCCESS)
goto sd_execute_read_cmd_failed;
}
if ((srb->cmnd[1] & 0x02) || (srb->cmnd[1] & 0x04))
cmd13_checkbit = true;
for (i = 0; i < 3; i++) {
retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0,
cmd13_checkbit);
if (retval == STATUS_SUCCESS)
break;
}
if (retval != STATUS_SUCCESS)
goto sd_execute_read_cmd_failed;
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
sd_execute_read_cmd_failed:
sd_card->pre_cmd_err = 1;
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
if (read_err)
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
release_sd_card(chip);
do_reset_sd_card(chip);
if (!(chip->card_ready & SD_CARD))
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval, rsp_len, i;
bool write_err = false, cmd13_checkbit = false;
u8 cmd_idx, rsp_type;
bool standby = false, send_cmd12 = false, acmd = false;
u32 data_len, arg;
#ifdef SUPPORT_SD_LOCK
int lock_cmd_fail = 0;
u8 sd_lock_state = 0;
u8 lock_cmd_type = 0;
#endif
if (!sd_card->sd_pass_thru_en) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
if (sd_card->pre_cmd_err) {
sd_card->pre_cmd_err = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
cmd_idx = srb->cmnd[2] & 0x3F;
if (srb->cmnd[1] & 0x04)
send_cmd12 = true;
if (srb->cmnd[1] & 0x02)
standby = true;
if (srb->cmnd[1] & 0x01)
acmd = true;
data_len = ((u32)srb->cmnd[7] << 16) | ((u32)srb->cmnd[8]
<< 8) | srb->cmnd[9];
arg = ((u32)srb->cmnd[3] << 24) | ((u32)srb->cmnd[4] << 16) |
((u32)srb->cmnd[5] << 8) | srb->cmnd[6];
#ifdef SUPPORT_SD_LOCK
if (cmd_idx == LOCK_UNLOCK) {
sd_lock_state = sd_card->sd_lock_status;
sd_lock_state &= SD_LOCKED;
}
#endif
retval = get_rsp_type(srb, &rsp_type, &rsp_len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
sd_card->last_rsp_type = rsp_type;
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
#ifdef SUPPORT_SD_LOCK
if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
if (CHK_MMC_8BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
SD_BUS_WIDTH_8);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
} else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
}
}
#else
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
#endif
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS)
goto sd_execute_write_cmd_failed;
}
if (standby) {
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS)
goto sd_execute_write_cmd_failed;
}
if (acmd) {
retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS)
goto sd_execute_write_cmd_failed;
}
retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
sd_card->rsp, rsp_len, false);
if (retval != STATUS_SUCCESS)
goto sd_execute_write_cmd_failed;
if (data_len <= 512) {
u16 i;
u8 *buf;
buf = kmalloc(data_len, GFP_KERNEL);
if (!buf)
return TRANSPORT_ERROR;
rtsx_stor_get_xfer_buf(buf, data_len, srb);
#ifdef SUPPORT_SD_LOCK
if (cmd_idx == LOCK_UNLOCK)
lock_cmd_type = buf[0] & 0x0F;
#endif
if (data_len > 256) {
rtsx_init_cmd(chip);
for (i = 0; i < 256; i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
PPBUF_BASE2 + i, 0xFF, buf[i]);
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
goto sd_execute_write_cmd_failed;
}
rtsx_init_cmd(chip);
for (i = 256; i < data_len; i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
PPBUF_BASE2 + i, 0xFF, buf[i]);
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
goto sd_execute_write_cmd_failed;
}
} else {
rtsx_init_cmd(chip);
for (i = 0; i < data_len; i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
PPBUF_BASE2 + i, 0xFF, buf[i]);
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
goto sd_execute_write_cmd_failed;
}
}
kfree(buf);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
srb->cmnd[8] & 0x03);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
srb->cmnd[9]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
0x01);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
SD_TRANSFER_END, SD_TRANSFER_END);
retval = rtsx_send_cmd(chip, SD_CARD, 250);
} else if (!(data_len & 0x1FF)) {
rtsx_init_cmd(chip);
trans_dma_enable(DMA_TO_DEVICE, chip, data_len, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
0x02);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
0xFF, (srb->cmnd[7] & 0xFE) >> 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
0xFF, (u8)((data_len & 0x0001FE00) >> 9));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
SD_TRANSFER_END, SD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
scsi_bufflen(srb),
scsi_sg_count(srb),
DMA_TO_DEVICE, 10000);
} else {
goto sd_execute_write_cmd_failed;
}
if (retval < 0) {
write_err = true;
rtsx_clear_sd_error(chip);
goto sd_execute_write_cmd_failed;
}
#ifdef SUPPORT_SD_LOCK
if (cmd_idx == LOCK_UNLOCK) {
if (lock_cmd_type == SD_ERASE) {
sd_card->sd_erase_status = SD_UNDER_ERASING;
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, CHECK_REG_CMD, 0xFD30, 0x02, 0x02);
retval = rtsx_send_cmd(chip, SD_CARD, 250);
if (retval < 0) {
write_err = true;
rtsx_clear_sd_error(chip);
goto sd_execute_write_cmd_failed;
}
retval = sd_update_lock_status(chip);
if (retval != STATUS_SUCCESS) {
dev_dbg(rtsx_dev(chip), "Lock command fail!\n");
lock_cmd_fail = 1;
}
}
#endif /* SUPPORT_SD_LOCK */
if (standby) {
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS)
goto sd_execute_write_cmd_failed;
}
if (send_cmd12) {
retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
SD_RSP_TYPE_R1b, NULL, 0,
false);
if (retval != STATUS_SUCCESS)
goto sd_execute_write_cmd_failed;
}
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS)
goto sd_execute_write_cmd_failed;
retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
if (retval != STATUS_SUCCESS)
goto sd_execute_write_cmd_failed;
retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
if (retval != STATUS_SUCCESS)
goto sd_execute_write_cmd_failed;
}
if ((srb->cmnd[1] & 0x02) || (srb->cmnd[1] & 0x04))
cmd13_checkbit = true;
for (i = 0; i < 3; i++) {
retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0,
cmd13_checkbit);
if (retval == STATUS_SUCCESS)
break;
}
if (retval != STATUS_SUCCESS)
goto sd_execute_write_cmd_failed;
#ifdef SUPPORT_SD_LOCK
if (cmd_idx == LOCK_UNLOCK) {
if (!lock_cmd_fail) {
dev_dbg(rtsx_dev(chip), "lock_cmd_type = 0x%x\n",
lock_cmd_type);
if (lock_cmd_type & SD_CLR_PWD)
sd_card->sd_lock_status &= ~SD_PWD_EXIST;
if (lock_cmd_type & SD_SET_PWD)
sd_card->sd_lock_status |= SD_PWD_EXIST;
}
dev_dbg(rtsx_dev(chip), "sd_lock_state = 0x%x, sd_card->sd_lock_status = 0x%x\n",
sd_lock_state, sd_card->sd_lock_status);
if (sd_lock_state ^ (sd_card->sd_lock_status & SD_LOCKED)) {
sd_card->sd_lock_notify = 1;
if (sd_lock_state &&
(sd_card->sd_lock_status & SD_LOCK_1BIT_MODE)) {
sd_card->sd_lock_status |= (SD_UNLOCK_POW_ON | SD_SDR_RST);
if (CHK_SD(sd_card)) {
retval = reset_sd(chip);
if (retval != STATUS_SUCCESS) {
sd_card->sd_lock_status &=
~(SD_UNLOCK_POW_ON | SD_SDR_RST);
goto sd_execute_write_cmd_failed;
}
}
sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST);
}
}
}
if (lock_cmd_fail) {
scsi_set_resid(srb, 0);
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
return TRANSPORT_FAILED;
}
#endif /* SUPPORT_SD_LOCK */
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
sd_execute_write_cmd_failed:
sd_card->pre_cmd_err = 1;
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
if (write_err)
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
release_sd_card(chip);
do_reset_sd_card(chip);
if (!(chip->card_ready & SD_CARD))
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int count;
u16 data_len;
if (!sd_card->sd_pass_thru_en) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
if (sd_card->pre_cmd_err) {
sd_card->pre_cmd_err = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
data_len = ((u16)srb->cmnd[7] << 8) | srb->cmnd[8];
if (sd_card->last_rsp_type == SD_RSP_TYPE_R0) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
} else if (sd_card->last_rsp_type == SD_RSP_TYPE_R2) {
count = (data_len < 17) ? data_len : 17;
} else {
count = (data_len < 6) ? data_len : 6;
}
rtsx_stor_set_xfer_buf(sd_card->rsp, count, srb);
dev_dbg(rtsx_dev(chip), "Response length: %d\n", data_len);
dev_dbg(rtsx_dev(chip), "Response: 0x%x 0x%x 0x%x 0x%x\n",
sd_card->rsp[0], sd_card->rsp[1],
sd_card->rsp[2], sd_card->rsp[3]);
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval;
if (!sd_card->sd_pass_thru_en) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
if (sd_card->pre_cmd_err) {
sd_card->pre_cmd_err = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
if (srb->cmnd[2] != 0x53 || srb->cmnd[3] != 0x44 ||
srb->cmnd[4] != 0x20 || srb->cmnd[5] != 0x43 ||
srb->cmnd[6] != 0x61 || srb->cmnd[7] != 0x72 ||
srb->cmnd[8] != 0x64) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
switch (srb->cmnd[1] & 0x0F) {
case 0:
#ifdef SUPPORT_SD_LOCK
if (srb->cmnd[9] == 0x64)
sd_card->sd_lock_status |= SD_SDR_RST;
#endif
retval = reset_sd_card(chip);
if (retval != STATUS_SUCCESS) {
#ifdef SUPPORT_SD_LOCK
sd_card->sd_lock_status &= ~SD_SDR_RST;
#endif
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
sd_card->pre_cmd_err = 1;
return TRANSPORT_FAILED;
}
#ifdef SUPPORT_SD_LOCK
sd_card->sd_lock_status &= ~SD_SDR_RST;
#endif
break;
case 1:
retval = reset_sd(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
sd_card->pre_cmd_err = 1;
return TRANSPORT_FAILED;
}
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
#endif
void sd_cleanup_work(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
if (sd_card->seq_mode) {
dev_dbg(rtsx_dev(chip), "SD: stop transmission\n");
sd_stop_seq_mode(chip);
sd_card->cleanup_counter = 0;
}
}
int sd_power_off_card3v3(struct rtsx_chip *chip)
{
int retval;
retval = disable_card_clock(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN, 0);
if (retval)
return retval;
if (!chip->ft2_fast_mode) {
retval = card_power_off(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
mdelay(50);
}
if (chip->asic_code) {
retval = sd_pull_ctl_disable(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL,
FPGA_SD_PULL_CTL_BIT | 0x20,
FPGA_SD_PULL_CTL_BIT);
if (retval)
return retval;
}
return STATUS_SUCCESS;
}
int release_sd_card(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
int retval;
chip->card_ready &= ~SD_CARD;
chip->card_fail &= ~SD_CARD;
chip->card_wp &= ~SD_CARD;
chip->sd_io = 0;
chip->sd_int = 0;
#ifdef SUPPORT_SD_LOCK
sd_card->sd_lock_status = 0;
sd_card->sd_erase_status = 0;
#endif
memset(sd_card->raw_csd, 0, 16);
memset(sd_card->raw_scr, 0, 8);
retval = sd_power_off_card3v3(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
| linux-master | drivers/staging/rts5208/sd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG ([email protected])
* Micky Ching ([email protected])
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include "rtsx.h"
#include "spi.h"
static inline void spi_set_err_code(struct rtsx_chip *chip, u8 err_code)
{
struct spi_info *spi = &chip->spi;
spi->err_code = err_code;
}
static int spi_init(struct rtsx_chip *chip)
{
int retval;
retval = rtsx_write_register(chip, SPI_CONTROL, 0xFF,
CS_POLARITY_LOW | DTO_MSB_FIRST
| SPI_MASTER | SPI_MODE0 | SPI_AUTO);
if (retval)
return retval;
retval = rtsx_write_register(chip, SPI_TCTL, EDO_TIMING_MASK,
SAMPLE_DELAY_HALF);
if (retval)
return retval;
return STATUS_SUCCESS;
}
static int spi_set_init_para(struct rtsx_chip *chip)
{
struct spi_info *spi = &chip->spi;
int retval;
retval = rtsx_write_register(chip, SPI_CLK_DIVIDER1, 0xFF,
(u8)(spi->clk_div >> 8));
if (retval)
return retval;
retval = rtsx_write_register(chip, SPI_CLK_DIVIDER0, 0xFF,
(u8)(spi->clk_div));
if (retval)
return retval;
retval = switch_clock(chip, spi->spi_clock);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = select_card(chip, SPI_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_register(chip, CARD_CLK_EN, SPI_CLK_EN,
SPI_CLK_EN);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_OE, SPI_OUTPUT_EN,
SPI_OUTPUT_EN);
if (retval)
return retval;
wait_timeout(10);
retval = spi_init(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int sf_polling_status(struct rtsx_chip *chip, int msec)
{
int retval;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, SPI_RDSR);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_POLLING_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, msec);
if (retval < 0) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_BUSY_ERR);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
{
struct spi_info *spi = &chip->spi;
int retval;
if (!spi->write_en)
return STATUS_SUCCESS;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_C_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
{
struct spi_info *spi = &chip->spi;
int retval;
if (!spi->write_en)
return STATUS_SUCCESS;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_C_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static void sf_program(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr,
u16 len)
{
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, (u8)len);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, (u8)(len >> 8));
if (addr_mode) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
(u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
(u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CADO_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CDO_MODE0);
}
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
}
static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
{
int retval;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
if (addr_mode) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
(u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
(u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CA_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_C_MODE0);
}
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int spi_init_eeprom(struct rtsx_chip *chip)
{
int retval;
int clk;
if (chip->asic_code)
clk = 30;
else
clk = CLK_30;
retval = rtsx_write_register(chip, SPI_CLK_DIVIDER1, 0xFF, 0x00);
if (retval)
return retval;
retval = rtsx_write_register(chip, SPI_CLK_DIVIDER0, 0xFF, 0x27);
if (retval)
return retval;
retval = switch_clock(chip, clk);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = select_card(chip, SPI_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_register(chip, CARD_CLK_EN, SPI_CLK_EN,
SPI_CLK_EN);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_OE, SPI_OUTPUT_EN,
SPI_OUTPUT_EN);
if (retval)
return retval;
wait_timeout(10);
retval = rtsx_write_register(chip, SPI_CONTROL, 0xFF,
CS_POLARITY_HIGH | SPI_EEPROM_AUTO);
if (retval)
return retval;
retval = rtsx_write_register(chip, SPI_TCTL, EDO_TIMING_MASK,
SAMPLE_DELAY_HALF);
if (retval)
return retval;
return STATUS_SUCCESS;
}
static int spi_eeprom_program_enable(struct rtsx_chip *chip)
{
int retval;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x86);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x13);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
int spi_erase_eeprom_chip(struct rtsx_chip *chip)
{
int retval;
retval = spi_init_eeprom(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = spi_eeprom_program_enable(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x12);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x84);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0)
return STATUS_FAIL;
retval = rtsx_write_register(chip, CARD_GPIO_DIR, 0x01, 0x01);
if (retval)
return retval;
return STATUS_SUCCESS;
}
int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr)
{
int retval;
retval = spi_init_eeprom(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = spi_eeprom_program_enable(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x07);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0)
return STATUS_FAIL;
retval = rtsx_write_register(chip, CARD_GPIO_DIR, 0x01, 0x01);
if (retval)
return retval;
return STATUS_SUCCESS;
}
int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
{
int retval;
u8 data;
retval = spi_init_eeprom(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x06);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CADI_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0)
return STATUS_FAIL;
wait_timeout(5);
retval = rtsx_read_register(chip, SPI_DATA, &data);
if (retval)
return retval;
if (val)
*val = data;
retval = rtsx_write_register(chip, CARD_GPIO_DIR, 0x01, 0x01);
if (retval)
return retval;
return STATUS_SUCCESS;
}
int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
{
int retval;
retval = spi_init_eeprom(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = spi_eeprom_program_enable(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x05);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, val);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x4E);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0)
return STATUS_FAIL;
retval = rtsx_write_register(chip, CARD_GPIO_DIR, 0x01, 0x01);
if (retval)
return retval;
return STATUS_SUCCESS;
}
int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct spi_info *spi = &chip->spi;
dev_dbg(rtsx_dev(chip), "%s: err_code = 0x%x\n", __func__,
spi->err_code);
rtsx_stor_set_xfer_buf(&spi->err_code,
min_t(int, scsi_bufflen(srb), 1), srb);
scsi_set_resid(srb, scsi_bufflen(srb) - 1);
return STATUS_SUCCESS;
}
int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct spi_info *spi = &chip->spi;
spi_set_err_code(chip, SPI_NO_ERR);
if (chip->asic_code)
spi->spi_clock = ((u16)(srb->cmnd[8]) << 8) | srb->cmnd[9];
else
spi->spi_clock = srb->cmnd[3];
spi->clk_div = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
spi->write_en = srb->cmnd[6];
dev_dbg(rtsx_dev(chip), "spi_clock = %d, clk_div = %d, write_en = %d\n",
spi->spi_clock, spi->clk_div, spi->write_en);
return STATUS_SUCCESS;
}
int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
u16 len;
u8 *buf;
spi_set_err_code(chip, SPI_NO_ERR);
len = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
if (len > 512) {
spi_set_err_code(chip, SPI_INVALID_COMMAND);
return STATUS_FAIL;
}
retval = spi_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, srb->cmnd[3]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, srb->cmnd[4]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, srb->cmnd[5]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, srb->cmnd[6]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, srb->cmnd[7]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, srb->cmnd[8]);
if (len == 0) {
if (srb->cmnd[9]) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
0xFF, SPI_TRANSFER0_START | SPI_C_MODE0);
}
} else {
if (srb->cmnd[9]) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CADI_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CDI_MODE0);
}
}
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
if (len) {
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return STATUS_ERROR;
retval = rtsx_read_ppbuf(chip, buf, len);
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_READ_ERR);
kfree(buf);
return STATUS_FAIL;
}
rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
scsi_set_resid(srb, 0);
kfree(buf);
}
return STATUS_SUCCESS;
}
int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
unsigned int index = 0, offset = 0;
u8 ins, slow_read;
u32 addr;
u16 len;
u8 *buf;
spi_set_err_code(chip, SPI_NO_ERR);
ins = srb->cmnd[3];
addr = ((u32)(srb->cmnd[4]) << 16) | ((u32)(srb->cmnd[5])
<< 8) | srb->cmnd[6];
len = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
slow_read = srb->cmnd[9];
retval = spi_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
buf = kmalloc(SF_PAGE_LEN, GFP_KERNEL);
if (!buf)
return STATUS_ERROR;
while (len) {
u16 pagelen = SF_PAGE_LEN - (u8)addr;
if (pagelen > len)
pagelen = len;
rtsx_init_cmd(chip);
trans_dma_enable(DMA_FROM_DEVICE, chip, 256, DMA_256);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
if (slow_read) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF,
(u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
(u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
(u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
(u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
(u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR3, 0xFF,
(u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32);
}
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF,
(u8)(pagelen >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF,
(u8)pagelen);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CADI_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0,
SPI_TRANSFER0_END, SPI_TRANSFER0_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
DMA_FROM_DEVICE, 10000);
if (retval < 0) {
kfree(buf);
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index, &offset,
TO_XFER_BUF);
addr += pagelen;
len -= pagelen;
}
scsi_set_resid(srb, 0);
kfree(buf);
return STATUS_SUCCESS;
}
int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
u8 ins, program_mode;
u32 addr;
u16 len;
u8 *buf;
unsigned int index = 0, offset = 0;
spi_set_err_code(chip, SPI_NO_ERR);
ins = srb->cmnd[3];
addr = ((u32)(srb->cmnd[4]) << 16) | ((u32)(srb->cmnd[5])
<< 8) | srb->cmnd[6];
len = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
program_mode = srb->cmnd[9];
retval = spi_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
if (program_mode == BYTE_PROGRAM) {
buf = kmalloc(4, GFP_KERNEL);
if (!buf)
return STATUS_ERROR;
while (len) {
retval = sf_enable_write(chip, SPI_WREN);
if (retval != STATUS_SUCCESS) {
kfree(buf);
return STATUS_FAIL;
}
rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
FROM_XFER_BUF);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
buf[0]);
sf_program(chip, ins, 1, addr, 1);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
kfree(buf);
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
retval = sf_polling_status(chip, 100);
if (retval != STATUS_SUCCESS) {
kfree(buf);
return STATUS_FAIL;
}
addr++;
len--;
}
kfree(buf);
} else if (program_mode == AAI_PROGRAM) {
int first_byte = 1;
retval = sf_enable_write(chip, SPI_WREN);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
buf = kmalloc(4, GFP_KERNEL);
if (!buf)
return STATUS_ERROR;
while (len) {
rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
FROM_XFER_BUF);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
buf[0]);
if (first_byte) {
sf_program(chip, ins, 1, addr, 1);
first_byte = 0;
} else {
sf_program(chip, ins, 0, 0, 1);
}
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
kfree(buf);
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
retval = sf_polling_status(chip, 100);
if (retval != STATUS_SUCCESS) {
kfree(buf);
return STATUS_FAIL;
}
len--;
}
kfree(buf);
retval = sf_disable_write(chip, SPI_WRDI);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = sf_polling_status(chip, 100);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else if (program_mode == PAGE_PROGRAM) {
buf = kmalloc(SF_PAGE_LEN, GFP_KERNEL);
if (!buf)
return STATUS_NOMEM;
while (len) {
u16 pagelen = SF_PAGE_LEN - (u8)addr;
if (pagelen > len)
pagelen = len;
retval = sf_enable_write(chip, SPI_WREN);
if (retval != STATUS_SUCCESS) {
kfree(buf);
return STATUS_FAIL;
}
rtsx_init_cmd(chip);
trans_dma_enable(DMA_TO_DEVICE, chip, 256, DMA_256);
sf_program(chip, ins, 1, addr, pagelen);
rtsx_send_cmd_no_wait(chip);
rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index,
&offset, FROM_XFER_BUF);
retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
DMA_TO_DEVICE, 100);
if (retval < 0) {
kfree(buf);
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
retval = sf_polling_status(chip, 100);
if (retval != STATUS_SUCCESS) {
kfree(buf);
return STATUS_FAIL;
}
addr += pagelen;
len -= pagelen;
}
kfree(buf);
} else {
spi_set_err_code(chip, SPI_INVALID_COMMAND);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
int spi_erase_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
u8 ins, erase_mode;
u32 addr;
spi_set_err_code(chip, SPI_NO_ERR);
ins = srb->cmnd[3];
addr = ((u32)(srb->cmnd[4]) << 16) | ((u32)(srb->cmnd[5])
<< 8) | srb->cmnd[6];
erase_mode = srb->cmnd[9];
retval = spi_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
if (erase_mode == PAGE_ERASE) {
retval = sf_enable_write(chip, SPI_WREN);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = sf_erase(chip, ins, 1, addr);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else if (erase_mode == CHIP_ERASE) {
retval = sf_enable_write(chip, SPI_WREN);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = sf_erase(chip, ins, 0, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
spi_set_err_code(chip, SPI_INVALID_COMMAND);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
u8 ins, status, ewsr;
ins = srb->cmnd[3];
status = srb->cmnd[4];
ewsr = srb->cmnd[5];
retval = spi_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
retval = sf_enable_write(chip, ewsr);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, status);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
SPI_TRANSFER0_START | SPI_CDO_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval != STATUS_SUCCESS) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
| linux-master | drivers/staging/rts5208/spi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG ([email protected])
* Micky Ching ([email protected])
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/kernel.h>
#include "rtsx.h"
#include "sd.h"
#include "xd.h"
#include "ms.h"
void do_remaining_work(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
#ifdef XD_DELAY_WRITE
struct xd_info *xd_card = &chip->xd_card;
#endif
struct ms_info *ms_card = &chip->ms_card;
if (chip->card_ready & SD_CARD) {
if (sd_card->seq_mode) {
rtsx_set_stat(chip, RTSX_STAT_RUN);
sd_card->cleanup_counter++;
} else {
sd_card->cleanup_counter = 0;
}
}
#ifdef XD_DELAY_WRITE
if (chip->card_ready & XD_CARD) {
if (xd_card->delay_write.delay_write_flag) {
rtsx_set_stat(chip, RTSX_STAT_RUN);
xd_card->cleanup_counter++;
} else {
xd_card->cleanup_counter = 0;
}
}
#endif
if (chip->card_ready & MS_CARD) {
if (CHK_MSPRO(ms_card)) {
if (ms_card->seq_mode) {
rtsx_set_stat(chip, RTSX_STAT_RUN);
ms_card->cleanup_counter++;
} else {
ms_card->cleanup_counter = 0;
}
} else {
#ifdef MS_DELAY_WRITE
if (ms_card->delay_write.delay_write_flag) {
rtsx_set_stat(chip, RTSX_STAT_RUN);
ms_card->cleanup_counter++;
} else {
ms_card->cleanup_counter = 0;
}
#endif
}
}
if (sd_card->cleanup_counter > POLLING_WAIT_CNT)
sd_cleanup_work(chip);
if (xd_card->cleanup_counter > POLLING_WAIT_CNT)
xd_cleanup_work(chip);
if (ms_card->cleanup_counter > POLLING_WAIT_CNT)
ms_cleanup_work(chip);
}
void try_to_switch_sdio_ctrl(struct rtsx_chip *chip)
{
u8 reg1 = 0, reg2 = 0;
rtsx_read_register(chip, 0xFF34, ®1);
rtsx_read_register(chip, 0xFF38, ®2);
dev_dbg(rtsx_dev(chip), "reg 0xFF34: 0x%x, reg 0xFF38: 0x%x\n",
reg1, reg2);
if ((reg1 & 0xC0) && (reg2 & 0xC0)) {
chip->sd_int = 1;
rtsx_write_register(chip, SDIO_CTRL, 0xFF,
SDIO_BUS_CTRL | SDIO_CD_CTRL);
rtsx_write_register(chip, PWR_GATE_CTRL,
LDO3318_PWR_MASK, LDO_ON);
}
}
#ifdef SUPPORT_SDIO_ASPM
void dynamic_configure_sdio_aspm(struct rtsx_chip *chip)
{
u8 buf[12], reg;
int i;
for (i = 0; i < 12; i++)
rtsx_read_register(chip, 0xFF08 + i, &buf[i]);
rtsx_read_register(chip, 0xFF25, ®);
if ((memcmp(buf, chip->sdio_raw_data, 12) != 0) || (reg & 0x03)) {
chip->sdio_counter = 0;
chip->sdio_idle = 0;
} else {
if (!chip->sdio_idle) {
chip->sdio_counter++;
if (chip->sdio_counter >= SDIO_IDLE_COUNT) {
chip->sdio_counter = 0;
chip->sdio_idle = 1;
}
}
}
memcpy(chip->sdio_raw_data, buf, 12);
if (chip->sdio_idle) {
if (!chip->sdio_aspm) {
dev_dbg(rtsx_dev(chip), "SDIO enter ASPM!\n");
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC,
0x30 | (chip->aspm_level[1] << 2));
chip->sdio_aspm = 1;
}
} else {
if (chip->sdio_aspm) {
dev_dbg(rtsx_dev(chip), "SDIO exit ASPM!\n");
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC, 0x30);
chip->sdio_aspm = 0;
}
}
}
#endif
void do_reset_sd_card(struct rtsx_chip *chip)
{
int retval;
dev_dbg(rtsx_dev(chip), "%s: %d, card2lun = 0x%x\n", __func__,
chip->sd_reset_counter, chip->card2lun[SD_CARD]);
if (chip->card2lun[SD_CARD] >= MAX_ALLOWED_LUN_CNT) {
clear_bit(SD_NR, &chip->need_reset);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
return;
}
chip->rw_fail_cnt[chip->card2lun[SD_CARD]] = 0;
rtsx_set_stat(chip, RTSX_STAT_RUN);
rtsx_write_register(chip, SDIO_CTRL, 0xFF, 0);
retval = reset_sd_card(chip);
if (chip->need_release & SD_CARD)
return;
if (retval == STATUS_SUCCESS) {
clear_bit(SD_NR, &chip->need_reset);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
chip->card_ready |= SD_CARD;
chip->card_fail &= ~SD_CARD;
chip->rw_card[chip->card2lun[SD_CARD]] = sd_rw;
} else {
if (chip->sd_io || chip->sd_reset_counter >= MAX_RESET_CNT) {
clear_bit(SD_NR, &chip->need_reset);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
} else {
chip->sd_reset_counter++;
}
chip->card_ready &= ~SD_CARD;
chip->card_fail |= SD_CARD;
chip->capacity[chip->card2lun[SD_CARD]] = 0;
chip->rw_card[chip->card2lun[SD_CARD]] = NULL;
rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN, 0);
if (!chip->ft2_fast_mode)
card_power_off(chip, SD_CARD);
if (chip->sd_io) {
chip->sd_int = 0;
try_to_switch_sdio_ctrl(chip);
} else {
disable_card_clock(chip, SD_CARD);
}
}
}
void do_reset_xd_card(struct rtsx_chip *chip)
{
int retval;
dev_dbg(rtsx_dev(chip), "%s: %d, card2lun = 0x%x\n", __func__,
chip->xd_reset_counter, chip->card2lun[XD_CARD]);
if (chip->card2lun[XD_CARD] >= MAX_ALLOWED_LUN_CNT) {
clear_bit(XD_NR, &chip->need_reset);
chip->xd_reset_counter = 0;
chip->xd_show_cnt = 0;
return;
}
chip->rw_fail_cnt[chip->card2lun[XD_CARD]] = 0;
rtsx_set_stat(chip, RTSX_STAT_RUN);
rtsx_write_register(chip, SDIO_CTRL, 0xFF, 0);
retval = reset_xd_card(chip);
if (chip->need_release & XD_CARD)
return;
if (retval == STATUS_SUCCESS) {
clear_bit(XD_NR, &chip->need_reset);
chip->xd_reset_counter = 0;
chip->card_ready |= XD_CARD;
chip->card_fail &= ~XD_CARD;
chip->rw_card[chip->card2lun[XD_CARD]] = xd_rw;
} else {
if (chip->xd_reset_counter >= MAX_RESET_CNT) {
clear_bit(XD_NR, &chip->need_reset);
chip->xd_reset_counter = 0;
chip->xd_show_cnt = 0;
} else {
chip->xd_reset_counter++;
}
chip->card_ready &= ~XD_CARD;
chip->card_fail |= XD_CARD;
chip->capacity[chip->card2lun[XD_CARD]] = 0;
chip->rw_card[chip->card2lun[XD_CARD]] = NULL;
rtsx_write_register(chip, CARD_OE, XD_OUTPUT_EN, 0);
if (!chip->ft2_fast_mode)
card_power_off(chip, XD_CARD);
disable_card_clock(chip, XD_CARD);
}
}
void do_reset_ms_card(struct rtsx_chip *chip)
{
int retval;
dev_dbg(rtsx_dev(chip), "%s: %d, card2lun = 0x%x\n", __func__,
chip->ms_reset_counter, chip->card2lun[MS_CARD]);
if (chip->card2lun[MS_CARD] >= MAX_ALLOWED_LUN_CNT) {
clear_bit(MS_NR, &chip->need_reset);
chip->ms_reset_counter = 0;
chip->ms_show_cnt = 0;
return;
}
chip->rw_fail_cnt[chip->card2lun[MS_CARD]] = 0;
rtsx_set_stat(chip, RTSX_STAT_RUN);
rtsx_write_register(chip, SDIO_CTRL, 0xFF, 0);
retval = reset_ms_card(chip);
if (chip->need_release & MS_CARD)
return;
if (retval == STATUS_SUCCESS) {
clear_bit(MS_NR, &chip->need_reset);
chip->ms_reset_counter = 0;
chip->card_ready |= MS_CARD;
chip->card_fail &= ~MS_CARD;
chip->rw_card[chip->card2lun[MS_CARD]] = ms_rw;
} else {
if (chip->ms_reset_counter >= MAX_RESET_CNT) {
clear_bit(MS_NR, &chip->need_reset);
chip->ms_reset_counter = 0;
chip->ms_show_cnt = 0;
} else {
chip->ms_reset_counter++;
}
chip->card_ready &= ~MS_CARD;
chip->card_fail |= MS_CARD;
chip->capacity[chip->card2lun[MS_CARD]] = 0;
chip->rw_card[chip->card2lun[MS_CARD]] = NULL;
rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN, 0);
if (!chip->ft2_fast_mode)
card_power_off(chip, MS_CARD);
disable_card_clock(chip, MS_CARD);
}
}
static void release_sdio(struct rtsx_chip *chip)
{
if (chip->sd_io) {
rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
SD_STOP | SD_CLR_ERR);
if (chip->chip_insert_with_sdio) {
chip->chip_insert_with_sdio = 0;
if (CHECK_PID(chip, 0x5288))
rtsx_write_register(chip, 0xFE5A, 0x08, 0x00);
else
rtsx_write_register(chip, 0xFE70, 0x80, 0x00);
}
rtsx_write_register(chip, SDIO_CTRL, SDIO_CD_CTRL, 0);
chip->sd_io = 0;
}
}
void rtsx_power_off_card(struct rtsx_chip *chip)
{
if ((chip->card_ready & SD_CARD) || chip->sd_io) {
sd_cleanup_work(chip);
sd_power_off_card3v3(chip);
}
if (chip->card_ready & XD_CARD) {
xd_cleanup_work(chip);
xd_power_off_card3v3(chip);
}
if (chip->card_ready & MS_CARD) {
ms_cleanup_work(chip);
ms_power_off_card3v3(chip);
}
}
void rtsx_release_cards(struct rtsx_chip *chip)
{
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
if ((chip->card_ready & SD_CARD) || chip->sd_io) {
if (chip->int_reg & SD_EXIST)
sd_cleanup_work(chip);
release_sd_card(chip);
}
if (chip->card_ready & XD_CARD) {
if (chip->int_reg & XD_EXIST)
xd_cleanup_work(chip);
release_xd_card(chip);
}
if (chip->card_ready & MS_CARD) {
if (chip->int_reg & MS_EXIST)
ms_cleanup_work(chip);
release_ms_card(chip);
}
}
void rtsx_reset_cards(struct rtsx_chip *chip)
{
if (!chip->need_reset)
return;
rtsx_set_stat(chip, RTSX_STAT_RUN);
rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
rtsx_disable_aspm(chip);
if ((chip->need_reset & SD_CARD) && chip->chip_insert_with_sdio)
clear_bit(SD_NR, &chip->need_reset);
if (chip->need_reset & XD_CARD) {
chip->card_exist |= XD_CARD;
if (chip->xd_show_cnt >= MAX_SHOW_CNT)
do_reset_xd_card(chip);
else
chip->xd_show_cnt++;
}
if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
if (chip->card_exist & XD_CARD) {
clear_bit(SD_NR, &chip->need_reset);
clear_bit(MS_NR, &chip->need_reset);
}
}
if (chip->need_reset & SD_CARD) {
chip->card_exist |= SD_CARD;
if (chip->sd_show_cnt >= MAX_SHOW_CNT) {
rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
do_reset_sd_card(chip);
} else {
chip->sd_show_cnt++;
}
}
if (chip->need_reset & MS_CARD) {
chip->card_exist |= MS_CARD;
if (chip->ms_show_cnt >= MAX_SHOW_CNT)
do_reset_ms_card(chip);
else
chip->ms_show_cnt++;
}
}
void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip)
{
rtsx_set_stat(chip, RTSX_STAT_RUN);
rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
if (reset_chip)
rtsx_reset_chip(chip);
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
if ((chip->int_reg & SD_EXIST) && (chip->need_reinit & SD_CARD)) {
release_sdio(chip);
release_sd_card(chip);
wait_timeout(100);
chip->card_exist |= SD_CARD;
do_reset_sd_card(chip);
}
if ((chip->int_reg & XD_EXIST) && (chip->need_reinit & XD_CARD)) {
release_xd_card(chip);
wait_timeout(100);
chip->card_exist |= XD_CARD;
do_reset_xd_card(chip);
}
if ((chip->int_reg & MS_EXIST) && (chip->need_reinit & MS_CARD)) {
release_ms_card(chip);
wait_timeout(100);
chip->card_exist |= MS_CARD;
do_reset_ms_card(chip);
}
chip->need_reinit = 0;
}
#ifdef DISABLE_CARD_INT
void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset,
unsigned long *need_release)
{
u8 release_map = 0, reset_map = 0;
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
if (chip->card_exist) {
if (chip->card_exist & XD_CARD) {
if (!(chip->int_reg & XD_EXIST))
release_map |= XD_CARD;
} else if (chip->card_exist & SD_CARD) {
if (!(chip->int_reg & SD_EXIST))
release_map |= SD_CARD;
} else if (chip->card_exist & MS_CARD) {
if (!(chip->int_reg & MS_EXIST))
release_map |= MS_CARD;
}
} else {
if (chip->int_reg & XD_EXIST)
reset_map |= XD_CARD;
else if (chip->int_reg & SD_EXIST)
reset_map |= SD_CARD;
else if (chip->int_reg & MS_EXIST)
reset_map |= MS_CARD;
}
if (reset_map) {
int xd_cnt = 0, sd_cnt = 0, ms_cnt = 0;
int i;
for (i = 0; i < (DEBOUNCE_CNT); i++) {
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
if (chip->int_reg & XD_EXIST)
xd_cnt++;
else
xd_cnt = 0;
if (chip->int_reg & SD_EXIST)
sd_cnt++;
else
sd_cnt = 0;
if (chip->int_reg & MS_EXIST)
ms_cnt++;
else
ms_cnt = 0;
wait_timeout(30);
}
reset_map = 0;
if (!(chip->card_exist & XD_CARD) &&
(xd_cnt > (DEBOUNCE_CNT - 1)))
reset_map |= XD_CARD;
if (!(chip->card_exist & SD_CARD) &&
(sd_cnt > (DEBOUNCE_CNT - 1)))
reset_map |= SD_CARD;
if (!(chip->card_exist & MS_CARD) &&
(ms_cnt > (DEBOUNCE_CNT - 1)))
reset_map |= MS_CARD;
}
if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN))
rtsx_write_register(chip, HOST_SLEEP_STATE, 0xC0, 0x00);
if (need_reset)
*need_reset = reset_map;
if (need_release)
*need_release = release_map;
}
#endif
void rtsx_init_cards(struct rtsx_chip *chip)
{
if (RTSX_TST_DELINK(chip) && (rtsx_get_stat(chip) != RTSX_STAT_SS)) {
dev_dbg(rtsx_dev(chip), "Reset chip in polling thread!\n");
rtsx_reset_chip(chip);
RTSX_CLR_DELINK(chip);
}
#ifdef DISABLE_CARD_INT
card_cd_debounce(chip, &chip->need_reset, &chip->need_release);
#endif
if (chip->need_release) {
if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
if (chip->int_reg & XD_EXIST) {
clear_bit(SD_NR, &chip->need_release);
clear_bit(MS_NR, &chip->need_release);
}
}
if (!(chip->card_exist & SD_CARD) && !chip->sd_io)
clear_bit(SD_NR, &chip->need_release);
if (!(chip->card_exist & XD_CARD))
clear_bit(XD_NR, &chip->need_release);
if (!(chip->card_exist & MS_CARD))
clear_bit(MS_NR, &chip->need_release);
dev_dbg(rtsx_dev(chip), "chip->need_release = 0x%x\n",
(unsigned int)(chip->need_release));
#ifdef SUPPORT_OCP
if (chip->need_release) {
if (chip->ocp_stat & (CARD_OC_NOW | CARD_OC_EVER))
rtsx_write_register(chip, OCPCLR,
CARD_OC_INT_CLR |
CARD_OC_CLR,
CARD_OC_INT_CLR |
CARD_OC_CLR);
chip->ocp_stat = 0;
}
#endif
if (chip->need_release) {
rtsx_set_stat(chip, RTSX_STAT_RUN);
rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
}
if (chip->need_release & SD_CARD) {
clear_bit(SD_NR, &chip->need_release);
chip->card_exist &= ~SD_CARD;
chip->card_ejected &= ~SD_CARD;
chip->card_fail &= ~SD_CARD;
CLR_BIT(chip->lun_mc, chip->card2lun[SD_CARD]);
chip->rw_fail_cnt[chip->card2lun[SD_CARD]] = 0;
rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
release_sdio(chip);
release_sd_card(chip);
}
if (chip->need_release & XD_CARD) {
clear_bit(XD_NR, &chip->need_release);
chip->card_exist &= ~XD_CARD;
chip->card_ejected &= ~XD_CARD;
chip->card_fail &= ~XD_CARD;
CLR_BIT(chip->lun_mc, chip->card2lun[XD_CARD]);
chip->rw_fail_cnt[chip->card2lun[XD_CARD]] = 0;
release_xd_card(chip);
if (CHECK_PID(chip, 0x5288) &&
CHECK_BARO_PKG(chip, QFN))
rtsx_write_register(chip, HOST_SLEEP_STATE,
0xC0, 0xC0);
}
if (chip->need_release & MS_CARD) {
clear_bit(MS_NR, &chip->need_release);
chip->card_exist &= ~MS_CARD;
chip->card_ejected &= ~MS_CARD;
chip->card_fail &= ~MS_CARD;
CLR_BIT(chip->lun_mc, chip->card2lun[MS_CARD]);
chip->rw_fail_cnt[chip->card2lun[MS_CARD]] = 0;
release_ms_card(chip);
}
dev_dbg(rtsx_dev(chip), "chip->card_exist = 0x%x\n",
chip->card_exist);
if (!chip->card_exist)
turn_off_led(chip, LED_GPIO);
}
if (chip->need_reset) {
dev_dbg(rtsx_dev(chip), "chip->need_reset = 0x%x\n",
(unsigned int)(chip->need_reset));
rtsx_reset_cards(chip);
}
if (chip->need_reinit) {
dev_dbg(rtsx_dev(chip), "chip->need_reinit = 0x%x\n",
(unsigned int)(chip->need_reinit));
rtsx_reinit_cards(chip, 0);
}
}
int switch_ssc_clock(struct rtsx_chip *chip, int clk)
{
int retval;
u8 n = (u8)(clk - 2), min_n, max_n;
u8 mcu_cnt, div, max_div, ssc_depth, ssc_depth_mask;
int sd_vpclk_phase_reset = 0;
if (chip->cur_clk == clk)
return STATUS_SUCCESS;
min_n = 60;
max_n = 120;
max_div = CLK_DIV_4;
dev_dbg(rtsx_dev(chip), "Switch SSC clock to %dMHz (cur_clk = %d)\n",
clk, chip->cur_clk);
if (clk <= 2 || n > max_n)
return STATUS_FAIL;
mcu_cnt = (u8)(125 / clk + 3);
if (mcu_cnt > 7)
mcu_cnt = 7;
div = CLK_DIV_1;
while ((n < min_n) && (div < max_div)) {
n = (n + 2) * 2 - 2;
div++;
}
dev_dbg(rtsx_dev(chip), "n = %d, div = %d\n", n, div);
if (chip->ssc_en) {
ssc_depth = 0x01;
n -= 2;
} else {
ssc_depth = 0;
}
ssc_depth_mask = 0x03;
dev_dbg(rtsx_dev(chip), "ssc_depth = %d\n", ssc_depth);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
rtsx_add_cmd(chip, WRITE_REG_CMD, CLK_DIV, 0xFF, (div << 4) | mcu_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL2, ssc_depth_mask, ssc_depth);
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (sd_vpclk_phase_reset) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
PHASE_NOT_RESET, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
}
retval = rtsx_send_cmd(chip, 0, WAIT_TIME);
if (retval < 0)
return STATUS_ERROR;
udelay(10);
retval = rtsx_write_register(chip, CLK_CTL, CLK_LOW_FREQ, 0);
if (retval)
return retval;
chip->cur_clk = clk;
return STATUS_SUCCESS;
}
int switch_normal_clock(struct rtsx_chip *chip, int clk)
{
int retval;
u8 sel, div, mcu_cnt;
int sd_vpclk_phase_reset = 0;
if (chip->cur_clk == clk)
return STATUS_SUCCESS;
switch (clk) {
case CLK_20:
dev_dbg(rtsx_dev(chip), "Switch clock to 20MHz\n");
sel = SSC_80;
div = CLK_DIV_4;
mcu_cnt = 7;
break;
case CLK_30:
dev_dbg(rtsx_dev(chip), "Switch clock to 30MHz\n");
sel = SSC_120;
div = CLK_DIV_4;
mcu_cnt = 7;
break;
case CLK_40:
dev_dbg(rtsx_dev(chip), "Switch clock to 40MHz\n");
sel = SSC_80;
div = CLK_DIV_2;
mcu_cnt = 7;
break;
case CLK_50:
dev_dbg(rtsx_dev(chip), "Switch clock to 50MHz\n");
sel = SSC_100;
div = CLK_DIV_2;
mcu_cnt = 6;
break;
case CLK_60:
dev_dbg(rtsx_dev(chip), "Switch clock to 60MHz\n");
sel = SSC_120;
div = CLK_DIV_2;
mcu_cnt = 6;
break;
case CLK_80:
dev_dbg(rtsx_dev(chip), "Switch clock to 80MHz\n");
sel = SSC_80;
div = CLK_DIV_1;
mcu_cnt = 5;
break;
case CLK_100:
dev_dbg(rtsx_dev(chip), "Switch clock to 100MHz\n");
sel = SSC_100;
div = CLK_DIV_1;
mcu_cnt = 5;
break;
case CLK_120:
dev_dbg(rtsx_dev(chip), "Switch clock to 120MHz\n");
sel = SSC_120;
div = CLK_DIV_1;
mcu_cnt = 5;
break;
case CLK_150:
dev_dbg(rtsx_dev(chip), "Switch clock to 150MHz\n");
sel = SSC_150;
div = CLK_DIV_1;
mcu_cnt = 4;
break;
case CLK_200:
dev_dbg(rtsx_dev(chip), "Switch clock to 200MHz\n");
sel = SSC_200;
div = CLK_DIV_1;
mcu_cnt = 4;
break;
default:
dev_dbg(rtsx_dev(chip), "Try to switch to an illegal clock (%d)\n",
clk);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CLK_CTL, 0xFF, CLK_LOW_FREQ);
if (retval)
return retval;
if (sd_vpclk_phase_reset) {
retval = rtsx_write_register(chip, SD_VPCLK0_CTL,
PHASE_NOT_RESET, 0);
if (retval)
return retval;
retval = rtsx_write_register(chip, SD_VPCLK1_CTL,
PHASE_NOT_RESET, 0);
if (retval)
return retval;
}
retval = rtsx_write_register(chip, CLK_DIV, 0xFF,
(div << 4) | mcu_cnt);
if (retval)
return retval;
retval = rtsx_write_register(chip, CLK_SEL, 0xFF, sel);
if (retval)
return retval;
if (sd_vpclk_phase_reset) {
udelay(200);
retval = rtsx_write_register(chip, SD_VPCLK0_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
if (retval)
return retval;
retval = rtsx_write_register(chip, SD_VPCLK1_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
if (retval)
return retval;
udelay(200);
}
retval = rtsx_write_register(chip, CLK_CTL, 0xFF, 0);
if (retval)
return retval;
chip->cur_clk = clk;
return STATUS_SUCCESS;
}
void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip,
u32 byte_cnt, u8 pack_size)
{
if (pack_size > DMA_1024)
pack_size = DMA_512;
rtsx_add_cmd(chip, WRITE_REG_CMD, IRQSTAT0, DMA_DONE_INT, DMA_DONE_INT);
rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC3, 0xFF, (u8)(byte_cnt >> 24));
rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC2, 0xFF, (u8)(byte_cnt >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC1, 0xFF, (u8)(byte_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC0, 0xFF, (u8)byte_cnt);
if (dir == DMA_FROM_DEVICE) {
rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
0x03 | DMA_PACK_SIZE_MASK,
DMA_DIR_FROM_CARD | DMA_EN | pack_size);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
0x03 | DMA_PACK_SIZE_MASK,
DMA_DIR_TO_CARD | DMA_EN | pack_size);
}
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
}
int enable_card_clock(struct rtsx_chip *chip, u8 card)
{
int retval;
u8 clk_en = 0;
if (card & XD_CARD)
clk_en |= XD_CLK_EN;
if (card & SD_CARD)
clk_en |= SD_CLK_EN;
if (card & MS_CARD)
clk_en |= MS_CLK_EN;
retval = rtsx_write_register(chip, CARD_CLK_EN, clk_en, clk_en);
if (retval)
return retval;
return STATUS_SUCCESS;
}
int disable_card_clock(struct rtsx_chip *chip, u8 card)
{
int retval;
u8 clk_en = 0;
if (card & XD_CARD)
clk_en |= XD_CLK_EN;
if (card & SD_CARD)
clk_en |= SD_CLK_EN;
if (card & MS_CARD)
clk_en |= MS_CLK_EN;
retval = rtsx_write_register(chip, CARD_CLK_EN, clk_en, 0);
if (retval)
return retval;
return STATUS_SUCCESS;
}
int card_power_on(struct rtsx_chip *chip, u8 card)
{
int retval;
u8 mask, val1, val2;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && card == MS_CARD) {
mask = MS_POWER_MASK;
val1 = MS_PARTIAL_POWER_ON;
val2 = MS_POWER_ON;
} else {
mask = SD_POWER_MASK;
val1 = SD_PARTIAL_POWER_ON;
val2 = SD_POWER_ON;
}
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, mask, val1);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
udelay(chip->pmos_pwr_on_interval);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, mask, val2);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
int card_power_off(struct rtsx_chip *chip, u8 card)
{
int retval;
u8 mask, val;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && card == MS_CARD) {
mask = MS_POWER_MASK;
val = MS_POWER_OFF;
} else {
mask = SD_POWER_MASK;
val = SD_POWER_OFF;
}
retval = rtsx_write_register(chip, CARD_PWR_CTL, mask, val);
if (retval)
return retval;
return STATUS_SUCCESS;
}
int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
u32 sec_addr, u16 sec_cnt)
{
int retval;
unsigned int lun = SCSI_LUN(srb);
int i;
if (!chip->rw_card[lun])
return STATUS_FAIL;
for (i = 0; i < 3; i++) {
chip->rw_need_retry = 0;
retval = chip->rw_card[lun](srb, chip, sec_addr, sec_cnt);
if (retval != STATUS_SUCCESS) {
if (rtsx_check_chip_exist(chip) != STATUS_SUCCESS) {
rtsx_release_chip(chip);
return STATUS_FAIL;
}
if (detect_card_cd(chip, chip->cur_card) !=
STATUS_SUCCESS) {
return STATUS_FAIL;
}
if (!chip->rw_need_retry) {
dev_dbg(rtsx_dev(chip), "RW fail, but no need to retry\n");
break;
}
} else {
chip->rw_need_retry = 0;
break;
}
dev_dbg(rtsx_dev(chip), "Retry RW, (i = %d)\n", i);
}
return retval;
}
int card_share_mode(struct rtsx_chip *chip, int card)
{
int retval;
u8 mask, value;
if (CHECK_PID(chip, 0x5208)) {
mask = CARD_SHARE_MASK;
if (card == SD_CARD)
value = CARD_SHARE_48_SD;
else if (card == MS_CARD)
value = CARD_SHARE_48_MS;
else if (card == XD_CARD)
value = CARD_SHARE_48_XD;
else
return STATUS_FAIL;
} else if (CHECK_PID(chip, 0x5288)) {
mask = 0x03;
if (card == SD_CARD)
value = CARD_SHARE_BAROSSA_SD;
else if (card == MS_CARD)
value = CARD_SHARE_BAROSSA_MS;
else if (card == XD_CARD)
value = CARD_SHARE_BAROSSA_XD;
else
return STATUS_FAIL;
} else {
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CARD_SHARE_MODE, mask, value);
if (retval)
return retval;
return STATUS_SUCCESS;
}
int select_card(struct rtsx_chip *chip, int card)
{
int retval;
if (chip->cur_card != card) {
u8 mod;
if (card == SD_CARD)
mod = SD_MOD_SEL;
else if (card == MS_CARD)
mod = MS_MOD_SEL;
else if (card == XD_CARD)
mod = XD_MOD_SEL;
else if (card == SPI_CARD)
mod = SPI_MOD_SEL;
else
return STATUS_FAIL;
retval = rtsx_write_register(chip, CARD_SELECT, 0x07, mod);
if (retval)
return retval;
chip->cur_card = card;
retval = card_share_mode(chip, card);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
void toggle_gpio(struct rtsx_chip *chip, u8 gpio)
{
u8 temp_reg;
rtsx_read_register(chip, CARD_GPIO, &temp_reg);
temp_reg ^= (0x01 << gpio);
rtsx_write_register(chip, CARD_GPIO, 0xFF, temp_reg);
}
void turn_on_led(struct rtsx_chip *chip, u8 gpio)
{
if (CHECK_PID(chip, 0x5288))
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
(u8)(1 << gpio));
else
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
}
void turn_off_led(struct rtsx_chip *chip, u8 gpio)
{
if (CHECK_PID(chip, 0x5288))
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
else
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
(u8)(1 << gpio));
}
int detect_card_cd(struct rtsx_chip *chip, int card)
{
u32 card_cd, status;
if (card == SD_CARD) {
card_cd = SD_EXIST;
} else if (card == MS_CARD) {
card_cd = MS_EXIST;
} else if (card == XD_CARD) {
card_cd = XD_EXIST;
} else {
dev_dbg(rtsx_dev(chip), "Wrong card type: 0x%x\n", card);
return STATUS_FAIL;
}
status = rtsx_readl(chip, RTSX_BIPR);
if (!(status & card_cd))
return STATUS_FAIL;
return STATUS_SUCCESS;
}
int check_card_exist(struct rtsx_chip *chip, unsigned int lun)
{
if (chip->card_exist & chip->lun2card[lun])
return 1;
return 0;
}
int check_card_ready(struct rtsx_chip *chip, unsigned int lun)
{
if (chip->card_ready & chip->lun2card[lun])
return 1;
return 0;
}
int check_card_wp(struct rtsx_chip *chip, unsigned int lun)
{
if (chip->card_wp & chip->lun2card[lun])
return 1;
return 0;
}
u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun)
{
if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD)
return (u8)XD_CARD;
else if ((chip->card_ready & chip->lun2card[lun]) == SD_CARD)
return (u8)SD_CARD;
else if ((chip->card_ready & chip->lun2card[lun]) == MS_CARD)
return (u8)MS_CARD;
return 0;
}
void eject_card(struct rtsx_chip *chip, unsigned int lun)
{
do_remaining_work(chip);
if ((chip->card_ready & chip->lun2card[lun]) == SD_CARD) {
release_sd_card(chip);
chip->card_ejected |= SD_CARD;
chip->card_ready &= ~SD_CARD;
chip->capacity[lun] = 0;
} else if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD) {
release_xd_card(chip);
chip->card_ejected |= XD_CARD;
chip->card_ready &= ~XD_CARD;
chip->capacity[lun] = 0;
} else if ((chip->card_ready & chip->lun2card[lun]) == MS_CARD) {
release_ms_card(chip);
chip->card_ejected |= MS_CARD;
chip->card_ready &= ~MS_CARD;
chip->capacity[lun] = 0;
}
}
| linux-master | drivers/staging/rts5208/rtsx_card.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG ([email protected])
* Micky Ching ([email protected])
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include "rtsx.h"
#include "rtsx_transport.h"
#include "rtsx_scsi.h"
#include "rtsx_card.h"
#include "xd.h"
static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no);
static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, u16 logoff,
u8 start_page, u8 end_page);
static inline void xd_set_err_code(struct rtsx_chip *chip, u8 err_code)
{
struct xd_info *xd_card = &chip->xd_card;
xd_card->err_code = err_code;
}
static int xd_set_init_para(struct rtsx_chip *chip)
{
struct xd_info *xd_card = &chip->xd_card;
int retval;
if (chip->asic_code)
xd_card->xd_clock = 47;
else
xd_card->xd_clock = CLK_50;
retval = switch_clock(chip, xd_card->xd_clock);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int xd_switch_clock(struct rtsx_chip *chip)
{
struct xd_info *xd_card = &chip->xd_card;
int retval;
retval = select_card(chip, XD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = switch_clock(chip, xd_card->xd_clock);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
{
int retval, i;
u8 *ptr;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_READ_ID);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
XD_TRANSFER_END);
for (i = 0; i < 4; i++)
rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_ADDRESS1 + i), 0, 0);
retval = rtsx_send_cmd(chip, XD_CARD, 20);
if (retval < 0)
return STATUS_FAIL;
ptr = rtsx_get_cmd_data(chip) + 1;
if (id_buf && buf_len) {
if (buf_len > 4)
buf_len = 4;
memcpy(id_buf, ptr, buf_len);
}
return STATUS_SUCCESS;
}
static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
{
struct xd_info *xd_card = &chip->xd_card;
switch (mode) {
case XD_RW_ADDR:
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3,
0xFF, (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
xd_card->addr_cycle |
XD_CALC_ECC |
XD_BA_NO_TRANSFORM);
break;
case XD_ERASE_ADDR:
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1,
0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
0xFF, (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
(xd_card->addr_cycle - 1) | XD_CALC_ECC |
XD_BA_NO_TRANSFORM);
break;
default:
break;
}
}
static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
u8 *buf, int buf_len)
{
int retval, i;
rtsx_init_cmd(chip);
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
for (i = 0; i < 6; i++)
rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_PAGE_STATUS + i),
0, 0);
for (i = 0; i < 4; i++)
rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_RESERVED0 + i),
0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
if (retval < 0)
return STATUS_FAIL;
if (buf && buf_len) {
u8 *ptr = rtsx_get_cmd_data(chip) + 1;
if (buf_len > 11)
buf_len = 11;
memcpy(buf, ptr, buf_len);
}
return STATUS_SUCCESS;
}
static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
u8 *buf, int buf_len)
{
int retval, i;
if (!buf || buf_len < 0)
return STATUS_FAIL;
rtsx_init_cmd(chip);
for (i = 0; i < buf_len; i++)
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i,
0, 0);
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
rtsx_clear_xd_error(chip);
return STATUS_FAIL;
}
memcpy(buf, rtsx_get_cmd_data(chip), buf_len);
return STATUS_SUCCESS;
}
static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
int buf_len)
{
int retval;
u8 reg;
if (!buf || buf_len < 10)
return STATUS_FAIL;
rtsx_init_cmd(chip);
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_READ_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 250);
if (retval == -ETIMEDOUT) {
rtsx_clear_xd_error(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, XD_PAGE_STATUS, ®);
if (retval)
return retval;
if (reg != XD_GPG) {
rtsx_clear_xd_error(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, XD_CTL, ®);
if (retval)
return retval;
if (!(reg & XD_ECC1_ERROR) || !(reg & XD_ECC1_UNCORRECTABLE)) {
retval = xd_read_data_from_ppb(chip, 0, buf, buf_len);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (reg & XD_ECC1_ERROR) {
u8 ecc_bit, ecc_byte;
retval = rtsx_read_register(chip, XD_ECC_BIT1,
&ecc_bit);
if (retval)
return retval;
retval = rtsx_read_register(chip, XD_ECC_BYTE1,
&ecc_byte);
if (retval)
return retval;
dev_dbg(rtsx_dev(chip), "ECC_BIT1 = 0x%x, ECC_BYTE1 = 0x%x\n",
ecc_bit, ecc_byte);
if (ecc_byte < buf_len) {
dev_dbg(rtsx_dev(chip), "Before correct: 0x%x\n",
buf[ecc_byte]);
buf[ecc_byte] ^= (1 << ecc_bit);
dev_dbg(rtsx_dev(chip), "After correct: 0x%x\n",
buf[ecc_byte]);
}
}
} else if (!(reg & XD_ECC2_ERROR) || !(reg & XD_ECC2_UNCORRECTABLE)) {
rtsx_clear_xd_error(chip);
retval = xd_read_data_from_ppb(chip, 256, buf, buf_len);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (reg & XD_ECC2_ERROR) {
u8 ecc_bit, ecc_byte;
retval = rtsx_read_register(chip, XD_ECC_BIT2,
&ecc_bit);
if (retval)
return retval;
retval = rtsx_read_register(chip, XD_ECC_BYTE2,
&ecc_byte);
if (retval)
return retval;
dev_dbg(rtsx_dev(chip), "ECC_BIT2 = 0x%x, ECC_BYTE2 = 0x%x\n",
ecc_bit, ecc_byte);
if (ecc_byte < buf_len) {
dev_dbg(rtsx_dev(chip), "Before correct: 0x%x\n",
buf[ecc_byte]);
buf[ecc_byte] ^= (1 << ecc_bit);
dev_dbg(rtsx_dev(chip), "After correct: 0x%x\n",
buf[ecc_byte]);
}
}
} else {
rtsx_clear_xd_error(chip);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static void xd_fill_pull_ctl_disable(struct rtsx_chip *chip)
{
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
0xFF, 0x4B);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
0xFF, 0x69);
}
}
}
static void xd_fill_pull_ctl_stage1_barossa(struct rtsx_chip *chip)
{
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x4B);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
}
}
static void xd_fill_pull_ctl_enable(struct rtsx_chip *chip)
{
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
0xFF, 0x53);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
0xFF, 0xA9);
}
}
}
static int xd_pull_ctl_disable(struct rtsx_chip *chip)
{
int retval;
if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
XD_D3_PD |
XD_D2_PD |
XD_D1_PD |
XD_D0_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
XD_D7_PD |
XD_D6_PD |
XD_D5_PD |
XD_D4_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
XD_WP_PD |
XD_CE_PD |
XD_CLE_PD |
XD_CD_PU);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
XD_RDY_PD |
XD_WE_PD |
XD_RE_PD |
XD_ALE_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
MS_INS_PU |
SD_WP_PD |
SD_CD_PU |
SD_CMD_PD);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL6, 0xFF,
MS_D5_PD | MS_D4_PD);
if (retval)
return retval;
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1,
0xFF, 0x55);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL2,
0xFF, 0x55);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL3,
0xFF, 0x4B);
if (retval)
return retval;
retval = rtsx_write_register(chip, CARD_PULL_CTL4,
0xFF, 0x69);
if (retval)
return retval;
}
}
return STATUS_SUCCESS;
}
static int reset_xd(struct rtsx_chip *chip)
{
struct xd_info *xd_card = &chip->xd_card;
int retval, i, j;
u8 *ptr, id_buf[4], redunt[11];
retval = select_card(chip, XD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF,
XD_PGSTS_NOT_FF);
if (chip->asic_code) {
if (!CHECK_PID(chip, 0x5288))
xd_fill_pull_ctl_disable(chip);
else
xd_fill_pull_ctl_stage1_barossa(chip);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
(FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) |
0x20);
}
if (!chip->ft2_fast_mode)
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_INIT,
XD_NO_AUTO_PWR_OFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
retval = rtsx_send_cmd(chip, XD_CARD, 100);
if (retval < 0)
return STATUS_FAIL;
if (!chip->ft2_fast_mode) {
retval = card_power_off(chip, XD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
wait_timeout(250);
rtsx_init_cmd(chip);
if (chip->asic_code) {
xd_fill_pull_ctl_enable(chip);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
(FPGA_XD_PULL_CTL_EN1 &
FPGA_XD_PULL_CTL_EN2) |
0x20);
}
retval = rtsx_send_cmd(chip, XD_CARD, 100);
if (retval < 0)
return STATUS_FAIL;
retval = card_power_on(chip, XD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
#ifdef SUPPORT_OCP
wait_timeout(50);
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
dev_dbg(rtsx_dev(chip), "Over current, OCPSTAT is 0x%x\n",
chip->ocp_stat);
return STATUS_FAIL;
}
#endif
}
rtsx_init_cmd(chip);
if (chip->ft2_fast_mode) {
if (chip->asic_code) {
xd_fill_pull_ctl_enable(chip);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
(FPGA_XD_PULL_CTL_EN1 &
FPGA_XD_PULL_CTL_EN2) |
0x20);
}
}
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, XD_OUTPUT_EN);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CTL, XD_CE_DISEN, XD_CE_DISEN);
retval = rtsx_send_cmd(chip, XD_CARD, 100);
if (retval < 0)
return STATUS_FAIL;
if (!chip->ft2_fast_mode)
wait_timeout(200);
retval = xd_set_init_para(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
/* Read ID to check if the timing setting is right */
for (i = 0; i < 4; i++) {
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
XD_TIME_SETUP_STEP * 3 +
XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
XD_TIME_SETUP_STEP * 3 +
XD_TIME_RW_STEP * (4 + i) +
XD_TIME_RWN_STEP * (3 + i));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_RESET);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
retval = rtsx_send_cmd(chip, XD_CARD, 100);
if (retval < 0)
return STATUS_FAIL;
ptr = rtsx_get_cmd_data(chip) + 1;
dev_dbg(rtsx_dev(chip), "XD_DAT: 0x%x, XD_CTL: 0x%x\n",
ptr[0], ptr[1]);
if (((ptr[0] & READY_FLAG) != READY_STATE) ||
!(ptr[1] & XD_RDY))
continue;
retval = xd_read_id(chip, READ_ID, id_buf, 4);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
dev_dbg(rtsx_dev(chip), "READ_ID: 0x%x 0x%x 0x%x 0x%x\n",
id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
xd_card->device_code = id_buf[1];
/* Check if the xD card is supported */
switch (xd_card->device_code) {
case XD_4M_X8_512_1:
case XD_4M_X8_512_2:
xd_card->block_shift = 4;
xd_card->page_off = 0x0F;
xd_card->addr_cycle = 3;
xd_card->zone_cnt = 1;
xd_card->capacity = 8000;
XD_SET_4MB(xd_card);
break;
case XD_8M_X8_512:
xd_card->block_shift = 4;
xd_card->page_off = 0x0F;
xd_card->addr_cycle = 3;
xd_card->zone_cnt = 1;
xd_card->capacity = 16000;
break;
case XD_16M_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 3;
xd_card->zone_cnt = 1;
xd_card->capacity = 32000;
break;
case XD_32M_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 3;
xd_card->zone_cnt = 2;
xd_card->capacity = 64000;
break;
case XD_64M_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 4;
xd_card->capacity = 128000;
break;
case XD_128M_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 8;
xd_card->capacity = 256000;
break;
case XD_256M_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 16;
xd_card->capacity = 512000;
break;
case XD_512M_X8:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 32;
xd_card->capacity = 1024000;
break;
case XD_1G_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 64;
xd_card->capacity = 2048000;
break;
case XD_2G_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 128;
xd_card->capacity = 4096000;
break;
default:
continue;
}
/* Confirm timing setting */
for (j = 0; j < 10; j++) {
retval = xd_read_id(chip, READ_ID, id_buf, 4);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (id_buf[1] != xd_card->device_code)
break;
}
if (j == 10)
break;
}
if (i == 4) {
xd_card->block_shift = 0;
xd_card->page_off = 0;
xd_card->addr_cycle = 0;
xd_card->capacity = 0;
return STATUS_FAIL;
}
retval = xd_read_id(chip, READ_XD_ID, id_buf, 4);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
dev_dbg(rtsx_dev(chip), "READ_XD_ID: 0x%x 0x%x 0x%x 0x%x\n",
id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
if (id_buf[2] != XD_ID_CODE)
return STATUS_FAIL;
/* Search CIS block */
for (i = 0; i < 24; i++) {
u32 page_addr;
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS)
return STATUS_FAIL;
page_addr = (u32)i << xd_card->block_shift;
for (j = 0; j < 3; j++) {
retval = xd_read_redundant(chip, page_addr, redunt, 11);
if (retval == STATUS_SUCCESS)
break;
}
if (j == 3)
continue;
if (redunt[BLOCK_STATUS] != XD_GBLK)
continue;
j = 0;
if (redunt[PAGE_STATUS] != XD_GPG) {
for (j = 1; j <= 8; j++) {
retval = xd_read_redundant(chip, page_addr + j,
redunt, 11);
if (retval == STATUS_SUCCESS) {
if (redunt[PAGE_STATUS] == XD_GPG)
break;
}
}
if (j == 9)
break;
}
/* Check CIS data */
if (redunt[BLOCK_STATUS] == XD_GBLK &&
(redunt[PARITY] & XD_BA1_ALL0)) {
u8 buf[10];
page_addr += j;
retval = xd_read_cis(chip, page_addr, buf, 10);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (buf[0] == 0x01 && buf[1] == 0x03 &&
buf[2] == 0xD9 &&
buf[3] == 0x01 && buf[4] == 0xFF &&
buf[5] == 0x18 && buf[6] == 0x02 &&
buf[7] == 0xDF && buf[8] == 0x01 &&
buf[9] == 0x20) {
xd_card->cis_block = (u16)i;
}
}
break;
}
dev_dbg(rtsx_dev(chip), "CIS block: 0x%x\n", xd_card->cis_block);
if (xd_card->cis_block == 0xFFFF)
return STATUS_FAIL;
chip->capacity[chip->card2lun[XD_CARD]] = xd_card->capacity;
return STATUS_SUCCESS;
}
static int xd_check_data_blank(u8 *redunt)
{
int i;
for (i = 0; i < 6; i++) {
if (redunt[PAGE_STATUS + i] != 0xFF)
return 0;
}
if ((redunt[PARITY] & (XD_ECC1_ALL1 | XD_ECC2_ALL1))
!= (XD_ECC1_ALL1 | XD_ECC2_ALL1))
return 0;
for (i = 0; i < 4; i++) {
if (redunt[RESERVED0 + i] != 0xFF)
return 0;
}
return 1;
}
static u16 xd_load_log_block_addr(u8 *redunt)
{
u16 addr = 0xFFFF;
if (redunt[PARITY] & XD_BA1_BA2_EQL)
addr = ((u16)redunt[BLOCK_ADDR1_H] << 8) |
redunt[BLOCK_ADDR1_L];
else if (redunt[PARITY] & XD_BA1_VALID)
addr = ((u16)redunt[BLOCK_ADDR1_H] << 8) |
redunt[BLOCK_ADDR1_L];
else if (redunt[PARITY] & XD_BA2_VALID)
addr = ((u16)redunt[BLOCK_ADDR2_H] << 8) |
redunt[BLOCK_ADDR2_L];
return addr;
}
static int xd_init_l2p_tbl(struct rtsx_chip *chip)
{
struct xd_info *xd_card = &chip->xd_card;
int size, i;
dev_dbg(rtsx_dev(chip), "%s: zone_cnt = %d\n", __func__,
xd_card->zone_cnt);
if (xd_card->zone_cnt < 1)
return STATUS_FAIL;
size = xd_card->zone_cnt * sizeof(struct zone_entry);
dev_dbg(rtsx_dev(chip), "Buffer size for l2p table is %d\n", size);
xd_card->zone = vmalloc(size);
if (!xd_card->zone)
return STATUS_ERROR;
for (i = 0; i < xd_card->zone_cnt; i++) {
xd_card->zone[i].build_flag = 0;
xd_card->zone[i].l2p_table = NULL;
xd_card->zone[i].free_table = NULL;
xd_card->zone[i].get_index = 0;
xd_card->zone[i].set_index = 0;
xd_card->zone[i].unused_blk_cnt = 0;
}
return STATUS_SUCCESS;
}
static inline void free_zone(struct zone_entry *zone)
{
if (!zone)
return;
zone->build_flag = 0;
zone->set_index = 0;
zone->get_index = 0;
zone->unused_blk_cnt = 0;
vfree(zone->l2p_table);
zone->l2p_table = NULL;
vfree(zone->free_table);
zone->free_table = NULL;
}
static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
{
struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
int zone_no;
zone_no = (int)phy_blk >> 10;
if (zone_no >= xd_card->zone_cnt) {
dev_dbg(rtsx_dev(chip), "Set unused block to invalid zone (zone_no = %d, zone_cnt = %d)\n",
zone_no, xd_card->zone_cnt);
return;
}
zone = &xd_card->zone[zone_no];
if (!zone->free_table) {
if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS)
return;
}
if (zone->set_index >= XD_FREE_TABLE_CNT ||
zone->set_index < 0) {
free_zone(zone);
dev_dbg(rtsx_dev(chip), "Set unused block fail, invalid set_index\n");
return;
}
dev_dbg(rtsx_dev(chip), "Set unused block to index %d\n",
zone->set_index);
zone->free_table[zone->set_index++] = (u16)(phy_blk & 0x3ff);
if (zone->set_index >= XD_FREE_TABLE_CNT)
zone->set_index = 0;
zone->unused_blk_cnt++;
}
static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
{
struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
u32 phy_blk;
if (zone_no >= xd_card->zone_cnt) {
dev_dbg(rtsx_dev(chip), "Get unused block from invalid zone (zone_no = %d, zone_cnt = %d)\n",
zone_no, xd_card->zone_cnt);
return BLK_NOT_FOUND;
}
zone = &xd_card->zone[zone_no];
if (zone->unused_blk_cnt == 0 ||
zone->set_index == zone->get_index) {
free_zone(zone);
dev_dbg(rtsx_dev(chip), "Get unused block fail, no unused block available\n");
return BLK_NOT_FOUND;
}
if (zone->get_index >= XD_FREE_TABLE_CNT || zone->get_index < 0) {
free_zone(zone);
dev_dbg(rtsx_dev(chip), "Get unused block fail, invalid get_index\n");
return BLK_NOT_FOUND;
}
dev_dbg(rtsx_dev(chip), "Get unused block from index %d\n",
zone->get_index);
phy_blk = zone->free_table[zone->get_index];
zone->free_table[zone->get_index++] = 0xFFFF;
if (zone->get_index >= XD_FREE_TABLE_CNT)
zone->get_index = 0;
zone->unused_blk_cnt--;
phy_blk += ((u32)(zone_no) << 10);
return phy_blk;
}
static void xd_set_l2p_tbl(struct rtsx_chip *chip,
int zone_no, u16 log_off, u16 phy_off)
{
struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
zone = &xd_card->zone[zone_no];
zone->l2p_table[log_off] = phy_off;
}
static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
{
struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
int retval;
zone = &xd_card->zone[zone_no];
if (zone->l2p_table[log_off] == 0xFFFF) {
u32 phy_blk = 0;
int i;
#ifdef XD_DELAY_WRITE
retval = xd_delay_write(chip);
if (retval != STATUS_SUCCESS) {
dev_dbg(rtsx_dev(chip), "In %s, delay write fail!\n",
__func__);
return BLK_NOT_FOUND;
}
#endif
if (zone->unused_blk_cnt <= 0) {
dev_dbg(rtsx_dev(chip), "No unused block!\n");
return BLK_NOT_FOUND;
}
for (i = 0; i < zone->unused_blk_cnt; i++) {
phy_blk = xd_get_unused_block(chip, zone_no);
if (phy_blk == BLK_NOT_FOUND) {
dev_dbg(rtsx_dev(chip), "No unused block available!\n");
return BLK_NOT_FOUND;
}
retval = xd_init_page(chip, phy_blk, log_off,
0, xd_card->page_off + 1);
if (retval == STATUS_SUCCESS)
break;
}
if (i >= zone->unused_blk_cnt) {
dev_dbg(rtsx_dev(chip), "No good unused block available!\n");
return BLK_NOT_FOUND;
}
xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(phy_blk & 0x3FF));
return phy_blk;
}
return (u32)zone->l2p_table[log_off] + ((u32)(zone_no) << 10);
}
int reset_xd_card(struct rtsx_chip *chip)
{
struct xd_info *xd_card = &chip->xd_card;
int retval;
memset(xd_card, 0, sizeof(struct xd_info));
xd_card->block_shift = 0;
xd_card->page_off = 0;
xd_card->addr_cycle = 0;
xd_card->capacity = 0;
xd_card->zone_cnt = 0;
xd_card->cis_block = 0xFFFF;
xd_card->delay_write.delay_write_flag = 0;
retval = enable_card_clock(chip, XD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = reset_xd(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = xd_init_l2p_tbl(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
{
struct xd_info *xd_card = &chip->xd_card;
int retval;
u32 page_addr;
u8 reg = 0;
dev_dbg(rtsx_dev(chip), "mark block 0x%x as bad block\n", phy_blk);
if (phy_blk == BLK_NOT_FOUND)
return STATUS_FAIL;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_LATER_BBLK);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_H, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_L, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED0, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED1, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED2, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED3, 0xFF, 0xFF);
page_addr = phy_blk << xd_card->block_shift;
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
xd_card->page_off + 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_WRITE_REDUNDANT);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
if (retval < 0) {
rtsx_clear_xd_error(chip);
rtsx_read_register(chip, XD_DAT, ®);
if (reg & PROGRAM_ERROR)
xd_set_err_code(chip, XD_PRG_ERROR);
else
xd_set_err_code(chip, XD_TO_ERROR);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
u16 logoff, u8 start_page, u8 end_page)
{
struct xd_info *xd_card = &chip->xd_card;
int retval;
u32 page_addr;
u8 reg = 0;
dev_dbg(rtsx_dev(chip), "Init block 0x%x\n", phy_blk);
if (start_page > end_page)
return STATUS_FAIL;
if (phy_blk == BLK_NOT_FOUND)
return STATUS_FAIL;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
0xFF, (u8)(logoff >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)logoff);
page_addr = (phy_blk << xd_card->block_shift) + start_page;
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG,
XD_BA_TRANSFORM, XD_BA_TRANSFORM);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT,
0xFF, (end_page - start_page));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
if (retval < 0) {
rtsx_clear_xd_error(chip);
rtsx_read_register(chip, XD_DAT, ®);
if (reg & PROGRAM_ERROR) {
xd_mark_bad_block(chip, phy_blk);
xd_set_err_code(chip, XD_PRG_ERROR);
} else {
xd_set_err_code(chip, XD_TO_ERROR);
}
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
u8 start_page, u8 end_page)
{
struct xd_info *xd_card = &chip->xd_card;
u32 old_page, new_page;
u8 i, reg = 0;
int retval;
dev_dbg(rtsx_dev(chip), "Copy page from block 0x%x to block 0x%x\n",
old_blk, new_blk);
if (start_page > end_page)
return STATUS_FAIL;
if (old_blk == BLK_NOT_FOUND || new_blk == BLK_NOT_FOUND)
return STATUS_FAIL;
old_page = (old_blk << xd_card->block_shift) + start_page;
new_page = (new_blk << xd_card->block_shift) + start_page;
XD_CLR_BAD_NEWBLK(xd_card);
retval = rtsx_write_register(chip, CARD_DATA_SOURCE, 0x01,
PINGPONG_BUFFER);
if (retval)
return retval;
for (i = start_page; i < end_page; i++) {
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
rtsx_clear_xd_error(chip);
xd_set_err_code(chip, XD_NO_CARD);
return STATUS_FAIL;
}
rtsx_init_cmd(chip);
xd_assign_phy_addr(chip, old_page, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
XD_AUTO_CHK_DATA_STATUS, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_READ_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
if (retval < 0) {
rtsx_clear_xd_error(chip);
reg = 0;
rtsx_read_register(chip, XD_CTL, ®);
if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
mdelay(100);
if (detect_card_cd(chip,
XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
return STATUS_FAIL;
}
if (((reg & XD_ECC1_ERROR) &&
(reg & XD_ECC1_UNCORRECTABLE)) ||
((reg & XD_ECC2_ERROR) &&
(reg & XD_ECC2_UNCORRECTABLE))) {
rtsx_write_register(chip,
XD_PAGE_STATUS,
0xFF,
XD_BPG);
rtsx_write_register(chip,
XD_BLOCK_STATUS,
0xFF,
XD_GBLK);
XD_SET_BAD_OLDBLK(xd_card);
dev_dbg(rtsx_dev(chip), "old block 0x%x ecc error\n",
old_blk);
}
} else {
xd_set_err_code(chip, XD_TO_ERROR);
return STATUS_FAIL;
}
}
if (XD_CHK_BAD_OLDBLK(xd_card))
rtsx_clear_xd_error(chip);
rtsx_init_cmd(chip);
xd_assign_phy_addr(chip, new_page, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_WRITE_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 300);
if (retval < 0) {
rtsx_clear_xd_error(chip);
reg = 0;
rtsx_read_register(chip, XD_DAT, ®);
if (reg & PROGRAM_ERROR) {
xd_mark_bad_block(chip, new_blk);
xd_set_err_code(chip, XD_PRG_ERROR);
XD_SET_BAD_NEWBLK(xd_card);
} else {
xd_set_err_code(chip, XD_TO_ERROR);
}
return STATUS_FAIL;
}
old_page++;
new_page++;
}
return STATUS_SUCCESS;
}
static int xd_reset_cmd(struct rtsx_chip *chip)
{
int retval;
u8 *ptr;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
0xFF, XD_TRANSFER_START | XD_RESET);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
retval = rtsx_send_cmd(chip, XD_CARD, 100);
if (retval < 0)
return STATUS_FAIL;
ptr = rtsx_get_cmd_data(chip) + 1;
if (((ptr[0] & READY_FLAG) == READY_STATE) && (ptr[1] & XD_RDY))
return STATUS_SUCCESS;
return STATUS_FAIL;
}
static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
{
struct xd_info *xd_card = &chip->xd_card;
u32 page_addr;
u8 reg = 0, *ptr;
int i, retval;
if (phy_blk == BLK_NOT_FOUND)
return STATUS_FAIL;
page_addr = phy_blk << xd_card->block_shift;
for (i = 0; i < 3; i++) {
rtsx_init_cmd(chip);
xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_ERASE);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
retval = rtsx_send_cmd(chip, XD_CARD, 250);
if (retval < 0) {
rtsx_clear_xd_error(chip);
rtsx_read_register(chip, XD_DAT, ®);
if (reg & PROGRAM_ERROR) {
xd_mark_bad_block(chip, phy_blk);
xd_set_err_code(chip, XD_PRG_ERROR);
return STATUS_FAIL;
}
xd_set_err_code(chip, XD_ERASE_FAIL);
retval = xd_reset_cmd(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
continue;
}
ptr = rtsx_get_cmd_data(chip) + 1;
if (*ptr & PROGRAM_ERROR) {
xd_mark_bad_block(chip, phy_blk);
xd_set_err_code(chip, XD_PRG_ERROR);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
xd_mark_bad_block(chip, phy_blk);
xd_set_err_code(chip, XD_ERASE_FAIL);
return STATUS_FAIL;
}
static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
{
struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
int retval;
u32 start, end, i;
u16 max_logoff, cur_fst_page_logoff;
u16 cur_lst_page_logoff, ent_lst_page_logoff;
u8 redunt[11];
dev_dbg(rtsx_dev(chip), "%s: %d\n", __func__, zone_no);
if (!xd_card->zone) {
retval = xd_init_l2p_tbl(chip);
if (retval != STATUS_SUCCESS)
return retval;
}
if (xd_card->zone[zone_no].build_flag) {
dev_dbg(rtsx_dev(chip), "l2p table of zone %d has been built\n",
zone_no);
return STATUS_SUCCESS;
}
zone = &xd_card->zone[zone_no];
if (!zone->l2p_table) {
zone->l2p_table = vmalloc(2000);
if (!zone->l2p_table)
goto build_fail;
}
memset((u8 *)(zone->l2p_table), 0xff, 2000);
if (!zone->free_table) {
zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
if (!zone->free_table)
goto build_fail;
}
memset((u8 *)(zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2);
if (zone_no == 0) {
if (xd_card->cis_block == 0xFFFF)
start = 0;
else
start = xd_card->cis_block + 1;
if (XD_CHK_4MB(xd_card)) {
end = 0x200;
max_logoff = 499;
} else {
end = 0x400;
max_logoff = 999;
}
} else {
start = (u32)(zone_no) << 10;
end = (u32)(zone_no + 1) << 10;
max_logoff = 999;
}
dev_dbg(rtsx_dev(chip), "start block 0x%x, end block 0x%x\n",
start, end);
zone->set_index = 0;
zone->get_index = 0;
zone->unused_blk_cnt = 0;
for (i = start; i < end; i++) {
u32 page_addr = i << xd_card->block_shift;
u32 phy_block;
retval = xd_read_redundant(chip, page_addr, redunt, 11);
if (retval != STATUS_SUCCESS)
continue;
if (redunt[BLOCK_STATUS] != 0xFF) {
dev_dbg(rtsx_dev(chip), "bad block\n");
continue;
}
if (xd_check_data_blank(redunt)) {
dev_dbg(rtsx_dev(chip), "blank block\n");
xd_set_unused_block(chip, i);
continue;
}
cur_fst_page_logoff = xd_load_log_block_addr(redunt);
if (cur_fst_page_logoff == 0xFFFF ||
cur_fst_page_logoff > max_logoff) {
retval = xd_erase_block(chip, i);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, i);
continue;
}
if (zone_no == 0 && cur_fst_page_logoff == 0 &&
redunt[PAGE_STATUS] != XD_GPG)
XD_SET_MBR_FAIL(xd_card);
if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
zone->l2p_table[cur_fst_page_logoff] = (u16)(i & 0x3FF);
continue;
}
phy_block = zone->l2p_table[cur_fst_page_logoff] +
((u32)((zone_no) << 10));
page_addr = ((i + 1) << xd_card->block_shift) - 1;
retval = xd_read_redundant(chip, page_addr, redunt, 11);
if (retval != STATUS_SUCCESS)
continue;
cur_lst_page_logoff = xd_load_log_block_addr(redunt);
if (cur_lst_page_logoff == cur_fst_page_logoff) {
int m;
page_addr = ((phy_block + 1) <<
xd_card->block_shift) - 1;
for (m = 0; m < 3; m++) {
retval = xd_read_redundant(chip, page_addr,
redunt, 11);
if (retval == STATUS_SUCCESS)
break;
}
if (m == 3) {
zone->l2p_table[cur_fst_page_logoff] =
(u16)(i & 0x3FF);
retval = xd_erase_block(chip, phy_block);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, phy_block);
continue;
}
ent_lst_page_logoff = xd_load_log_block_addr(redunt);
if (ent_lst_page_logoff != cur_fst_page_logoff) {
zone->l2p_table[cur_fst_page_logoff] =
(u16)(i & 0x3FF);
retval = xd_erase_block(chip, phy_block);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, phy_block);
continue;
} else {
retval = xd_erase_block(chip, i);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, i);
}
} else {
retval = xd_erase_block(chip, i);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, i);
}
}
if (XD_CHK_4MB(xd_card))
end = 500;
else
end = 1000;
i = 0;
for (start = 0; start < end; start++) {
if (zone->l2p_table[start] == 0xFFFF)
i++;
}
dev_dbg(rtsx_dev(chip), "Block count %d, invalid L2P entry %d\n",
end, i);
dev_dbg(rtsx_dev(chip), "Total unused block: %d\n",
zone->unused_blk_cnt);
if ((zone->unused_blk_cnt - i) < 1)
chip->card_wp |= XD_CARD;
zone->build_flag = 1;
return STATUS_SUCCESS;
build_fail:
vfree(zone->l2p_table);
zone->l2p_table = NULL;
vfree(zone->free_table);
zone->free_table = NULL;
return STATUS_FAIL;
}
static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
{
int retval;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_SET_CMD);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 200);
if (retval < 0)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
u32 log_blk, u8 start_page, u8 end_page,
u8 *buf, unsigned int *index,
unsigned int *offset)
{
struct xd_info *xd_card = &chip->xd_card;
u32 page_addr, new_blk;
u16 log_off;
u8 reg_val, page_cnt;
int zone_no, retval, i;
if (start_page > end_page)
goto status_fail;
page_cnt = end_page - start_page;
zone_no = (int)(log_blk / 1000);
log_off = (u16)(log_blk % 1000);
if ((phy_blk & 0x3FF) == 0x3FF) {
for (i = 0; i < 256; i++) {
page_addr = ((u32)i) << xd_card->block_shift;
retval = xd_read_redundant(chip, page_addr, NULL, 0);
if (retval == STATUS_SUCCESS)
break;
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
goto status_fail;
}
}
}
page_addr = (phy_blk << xd_card->block_shift) + start_page;
rtsx_init_cmd(chip);
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_PPB_TO_SIE, XD_PPB_TO_SIE);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
trans_dma_enable(chip->srb->sc_data_direction, chip,
page_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_READ_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END | XD_PPB_EMPTY,
XD_TRANSFER_END | XD_PPB_EMPTY);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
scsi_sg_count(chip->srb),
index, offset, DMA_FROM_DEVICE,
chip->xd_timeout);
if (retval < 0) {
rtsx_clear_xd_error(chip);
if (retval == -ETIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
goto status_fail;
} else {
goto fail;
}
}
return STATUS_SUCCESS;
fail:
retval = rtsx_read_register(chip, XD_PAGE_STATUS, ®_val);
if (retval)
return retval;
if (reg_val != XD_GPG)
xd_set_err_code(chip, XD_PRG_ERROR);
retval = rtsx_read_register(chip, XD_CTL, ®_val);
if (retval)
return retval;
if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
(XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ||
((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
(XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
wait_timeout(100);
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
goto status_fail;
}
xd_set_err_code(chip, XD_ECC_ERROR);
new_blk = xd_get_unused_block(chip, zone_no);
if (new_blk == NO_NEW_BLK) {
XD_CLR_BAD_OLDBLK(xd_card);
goto status_fail;
}
retval = xd_copy_page(chip, phy_blk, new_blk, 0,
xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
if (!XD_CHK_BAD_NEWBLK(xd_card)) {
retval = xd_erase_block(chip, new_blk);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, new_blk);
} else {
XD_CLR_BAD_NEWBLK(xd_card);
}
XD_CLR_BAD_OLDBLK(xd_card);
goto status_fail;
}
xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
xd_erase_block(chip, phy_blk);
xd_mark_bad_block(chip, phy_blk);
XD_CLR_BAD_OLDBLK(xd_card);
}
status_fail:
return STATUS_FAIL;
}
static int xd_finish_write(struct rtsx_chip *chip,
u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
{
struct xd_info *xd_card = &chip->xd_card;
int retval, zone_no;
u16 log_off;
dev_dbg(rtsx_dev(chip), "old_blk = 0x%x, ", old_blk);
dev_dbg(rtsx_dev(chip), "new_blk = 0x%x, ", new_blk);
dev_dbg(rtsx_dev(chip), "log_blk = 0x%x\n", log_blk);
if (page_off > xd_card->page_off)
return STATUS_FAIL;
zone_no = (int)(log_blk / 1000);
log_off = (u16)(log_blk % 1000);
if (old_blk == BLK_NOT_FOUND) {
retval = xd_init_page(chip, new_blk, log_off,
page_off, xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
retval = xd_erase_block(chip, new_blk);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, new_blk);
return STATUS_FAIL;
}
} else {
retval = xd_copy_page(chip, old_blk, new_blk,
page_off, xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
if (!XD_CHK_BAD_NEWBLK(xd_card)) {
retval = xd_erase_block(chip, new_blk);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, new_blk);
}
XD_CLR_BAD_NEWBLK(xd_card);
return STATUS_FAIL;
}
retval = xd_erase_block(chip, old_blk);
if (retval == STATUS_SUCCESS) {
if (XD_CHK_BAD_OLDBLK(xd_card)) {
xd_mark_bad_block(chip, old_blk);
XD_CLR_BAD_OLDBLK(xd_card);
} else {
xd_set_unused_block(chip, old_blk);
}
} else {
xd_set_err_code(chip, XD_NO_ERROR);
XD_CLR_BAD_OLDBLK(xd_card);
}
}
xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
return STATUS_SUCCESS;
}
static int xd_prepare_write(struct rtsx_chip *chip,
u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
{
int retval;
dev_dbg(rtsx_dev(chip), "%s, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x, page_off = %d\n",
__func__, old_blk, new_blk, log_blk, (int)page_off);
if (page_off) {
retval = xd_copy_page(chip, old_blk, new_blk, 0, page_off);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
u32 new_blk, u32 log_blk, u8 start_page,
u8 end_page, u8 *buf, unsigned int *index,
unsigned int *offset)
{
struct xd_info *xd_card = &chip->xd_card;
u32 page_addr;
int zone_no, retval;
u16 log_off;
u8 page_cnt, reg_val;
dev_dbg(rtsx_dev(chip), "%s, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x\n",
__func__, old_blk, new_blk, log_blk);
if (start_page > end_page)
goto status_fail;
page_cnt = end_page - start_page;
zone_no = (int)(log_blk / 1000);
log_off = (u16)(log_blk % 1000);
page_addr = (new_blk << xd_card->block_shift) + start_page;
retval = xd_send_cmd(chip, READ1_1);
if (retval != STATUS_SUCCESS)
goto status_fail;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
0xFF, (u8)(log_off >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)log_off);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
XD_BA_TRANSFORM);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
trans_dma_enable(chip->srb->sc_data_direction, chip,
page_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
0xFF, XD_TRANSFER_START | XD_WRITE_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
scsi_sg_count(chip->srb),
index, offset, DMA_TO_DEVICE, chip->xd_timeout);
if (retval < 0) {
rtsx_clear_xd_error(chip);
if (retval == -ETIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
goto status_fail;
} else {
goto fail;
}
}
if (end_page == (xd_card->page_off + 1)) {
xd_card->delay_write.delay_write_flag = 0;
if (old_blk != BLK_NOT_FOUND) {
retval = xd_erase_block(chip, old_blk);
if (retval == STATUS_SUCCESS) {
if (XD_CHK_BAD_OLDBLK(xd_card)) {
xd_mark_bad_block(chip, old_blk);
XD_CLR_BAD_OLDBLK(xd_card);
} else {
xd_set_unused_block(chip, old_blk);
}
} else {
xd_set_err_code(chip, XD_NO_ERROR);
XD_CLR_BAD_OLDBLK(xd_card);
}
}
xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
}
return STATUS_SUCCESS;
fail:
retval = rtsx_read_register(chip, XD_DAT, ®_val);
if (retval)
return retval;
if (reg_val & PROGRAM_ERROR) {
xd_set_err_code(chip, XD_PRG_ERROR);
xd_mark_bad_block(chip, new_blk);
}
status_fail:
return STATUS_FAIL;
}
#ifdef XD_DELAY_WRITE
int xd_delay_write(struct rtsx_chip *chip)
{
struct xd_info *xd_card = &chip->xd_card;
struct xd_delay_write_tag *delay_write = &xd_card->delay_write;
int retval;
if (delay_write->delay_write_flag) {
retval = xd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
delay_write->delay_write_flag = 0;
retval = xd_finish_write(chip,
delay_write->old_phyblock,
delay_write->new_phyblock,
delay_write->logblock,
delay_write->pageoff);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
#endif
int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
u32 start_sector, u16 sector_cnt)
{
struct xd_info *xd_card = &chip->xd_card;
unsigned int lun = SCSI_LUN(srb);
#ifdef XD_DELAY_WRITE
struct xd_delay_write_tag *delay_write = &xd_card->delay_write;
#endif
int retval, zone_no;
unsigned int index = 0, offset = 0;
u32 log_blk, old_blk = 0, new_blk = 0;
u16 log_off, total_sec_cnt = sector_cnt;
u8 start_page, end_page = 0, page_cnt;
u8 *ptr;
xd_set_err_code(chip, XD_NO_ERROR);
xd_card->cleanup_counter = 0;
dev_dbg(rtsx_dev(chip), "%s: scsi_sg_count = %d\n", __func__,
scsi_sg_count(srb));
ptr = (u8 *)scsi_sglist(srb);
retval = xd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
log_blk = start_sector >> xd_card->block_shift;
start_page = (u8)start_sector & xd_card->page_off;
zone_no = (int)(log_blk / 1000);
log_off = (u16)(log_blk % 1000);
if (xd_card->zone[zone_no].build_flag == 0) {
retval = xd_build_l2p_tbl(chip, zone_no);
if (retval != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
}
if (srb->sc_data_direction == DMA_TO_DEVICE) {
#ifdef XD_DELAY_WRITE
if (delay_write->delay_write_flag &&
delay_write->logblock == log_blk &&
start_page > delay_write->pageoff) {
delay_write->delay_write_flag = 0;
if (delay_write->old_phyblock != BLK_NOT_FOUND) {
retval = xd_copy_page(chip,
delay_write->old_phyblock,
delay_write->new_phyblock,
delay_write->pageoff,
start_page);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
}
old_blk = delay_write->old_phyblock;
new_blk = delay_write->new_phyblock;
} else if (delay_write->delay_write_flag &&
(delay_write->logblock == log_blk) &&
(start_page == delay_write->pageoff)) {
delay_write->delay_write_flag = 0;
old_blk = delay_write->old_phyblock;
new_blk = delay_write->new_phyblock;
} else {
retval = xd_delay_write(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
#endif
old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
new_blk = xd_get_unused_block(chip, zone_no);
if (old_blk == BLK_NOT_FOUND ||
new_blk == BLK_NOT_FOUND) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
retval = xd_prepare_write(chip, old_blk, new_blk,
log_blk, start_page);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, XD_CARD) !=
STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
#ifdef XD_DELAY_WRITE
}
#endif
} else {
#ifdef XD_DELAY_WRITE
retval = xd_delay_write(chip);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return STATUS_FAIL;
}
#endif
old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
if (old_blk == BLK_NOT_FOUND) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return STATUS_FAIL;
}
}
dev_dbg(rtsx_dev(chip), "old_blk = 0x%x\n", old_blk);
while (total_sec_cnt) {
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
if ((start_page + total_sec_cnt) > (xd_card->page_off + 1))
end_page = xd_card->page_off + 1;
else
end_page = start_page + (u8)total_sec_cnt;
page_cnt = end_page - start_page;
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
retval = xd_read_multiple_pages(chip, old_blk, log_blk,
start_page, end_page,
ptr, &index, &offset);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return STATUS_FAIL;
}
} else {
retval = xd_write_multiple_pages(chip, old_blk,
new_blk, log_blk,
start_page, end_page,
ptr, &index, &offset);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
}
total_sec_cnt -= page_cnt;
if (scsi_sg_count(srb) == 0)
ptr += page_cnt * 512;
if (total_sec_cnt == 0)
break;
log_blk++;
zone_no = (int)(log_blk / 1000);
log_off = (u16)(log_blk % 1000);
if (xd_card->zone[zone_no].build_flag == 0) {
retval = xd_build_l2p_tbl(chip, zone_no);
if (retval != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
}
old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
if (old_blk == BLK_NOT_FOUND) {
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
if (srb->sc_data_direction == DMA_TO_DEVICE) {
new_blk = xd_get_unused_block(chip, zone_no);
if (new_blk == BLK_NOT_FOUND) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
}
start_page = 0;
}
if (srb->sc_data_direction == DMA_TO_DEVICE &&
(end_page != (xd_card->page_off + 1))) {
#ifdef XD_DELAY_WRITE
delay_write->delay_write_flag = 1;
delay_write->old_phyblock = old_blk;
delay_write->new_phyblock = new_blk;
delay_write->logblock = log_blk;
delay_write->pageoff = end_page;
#else
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
retval = xd_finish_write(chip, old_blk, new_blk,
log_blk, end_page);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
return STATUS_FAIL;
}
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
}
#endif
}
scsi_set_resid(srb, 0);
return STATUS_SUCCESS;
}
void xd_free_l2p_tbl(struct rtsx_chip *chip)
{
struct xd_info *xd_card = &chip->xd_card;
int i = 0;
if (xd_card->zone) {
for (i = 0; i < xd_card->zone_cnt; i++) {
vfree(xd_card->zone[i].l2p_table);
xd_card->zone[i].l2p_table = NULL;
vfree(xd_card->zone[i].free_table);
xd_card->zone[i].free_table = NULL;
}
vfree(xd_card->zone);
xd_card->zone = NULL;
}
}
void xd_cleanup_work(struct rtsx_chip *chip)
{
#ifdef XD_DELAY_WRITE
struct xd_info *xd_card = &chip->xd_card;
if (xd_card->delay_write.delay_write_flag) {
dev_dbg(rtsx_dev(chip), "xD: delay write\n");
xd_delay_write(chip);
xd_card->cleanup_counter = 0;
}
#endif
}
int xd_power_off_card3v3(struct rtsx_chip *chip)
{
int retval;
retval = disable_card_clock(chip, XD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_register(chip, CARD_OE, XD_OUTPUT_EN, 0);
if (retval)
return retval;
if (!chip->ft2_fast_mode) {
retval = card_power_off(chip, XD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
wait_timeout(50);
}
if (chip->asic_code) {
retval = xd_pull_ctl_disable(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL, 0xFF, 0xDF);
if (retval)
return retval;
}
return STATUS_SUCCESS;
}
int release_xd_card(struct rtsx_chip *chip)
{
struct xd_info *xd_card = &chip->xd_card;
int retval;
chip->card_ready &= ~XD_CARD;
chip->card_fail &= ~XD_CARD;
chip->card_wp &= ~XD_CARD;
xd_card->delay_write.delay_write_flag = 0;
xd_free_l2p_tbl(chip);
retval = xd_power_off_card3v3(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
| linux-master | drivers/staging/rts5208/xd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG ([email protected])
* Micky Ching ([email protected])
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include "rtsx.h"
/***********************************************************************
* Scatter-gather transfer buffer access routines
***********************************************************************/
/*
* Copy a buffer of length buflen to/from the srb's transfer buffer.
* (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
* points to a list of s-g entries and we ignore srb->request_bufflen.
* For non-scatter-gather transfers, srb->request_buffer points to the
* transfer buffer itself and srb->request_bufflen is the buffer's length.)
* Update the *index and *offset variables so that the next copy will
* pick up from where this one left off.
*/
unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
unsigned int buflen,
struct scsi_cmnd *srb,
unsigned int *index,
unsigned int *offset,
enum xfer_buf_dir dir)
{
unsigned int cnt;
/* If not using scatter-gather, just transfer the data directly. */
if (scsi_sg_count(srb) == 0) {
unsigned char *sgbuffer;
if (*offset >= scsi_bufflen(srb))
return 0;
cnt = min(buflen, scsi_bufflen(srb) - *offset);
sgbuffer = (unsigned char *)scsi_sglist(srb) + *offset;
if (dir == TO_XFER_BUF)
memcpy(sgbuffer, buffer, cnt);
else
memcpy(buffer, sgbuffer, cnt);
*offset += cnt;
/*
* Using scatter-gather. We have to go through the list one entry
* at a time. Each s-g entry contains some number of pages which
* have to be copied one at a time.
*/
} else {
struct scatterlist *sg =
(struct scatterlist *)scsi_sglist(srb)
+ *index;
/*
* This loop handles a single s-g list entry, which may
* include multiple pages. Find the initial page structure
* and the starting offset within the page, and update
* the *offset and *index values for the next loop.
*/
cnt = 0;
while (cnt < buflen && *index < scsi_sg_count(srb)) {
struct page *page = sg_page(sg) +
((sg->offset + *offset) >> PAGE_SHIFT);
unsigned int poff = (sg->offset + *offset) &
(PAGE_SIZE - 1);
unsigned int sglen = sg->length - *offset;
if (sglen > buflen - cnt) {
/* Transfer ends within this s-g entry */
sglen = buflen - cnt;
*offset += sglen;
} else {
/* Transfer continues to next s-g entry */
*offset = 0;
++*index;
++sg;
}
while (sglen > 0) {
unsigned int plen = min(sglen, (unsigned int)
PAGE_SIZE - poff);
if (dir == TO_XFER_BUF)
memcpy_to_page(page, poff, buffer + cnt, plen);
else
memcpy_from_page(buffer + cnt, page, poff, plen);
/* Start at the beginning of the next page */
poff = 0;
++page;
cnt += plen;
sglen -= plen;
}
}
}
/* Return the amount actually transferred */
return cnt;
}
/*
* Store the contents of buffer into srb's transfer buffer and set the
* SCSI residue.
*/
void rtsx_stor_set_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb)
{
unsigned int index = 0, offset = 0;
rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
TO_XFER_BUF);
if (buflen < scsi_bufflen(srb))
scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
}
void rtsx_stor_get_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb)
{
unsigned int index = 0, offset = 0;
rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
FROM_XFER_BUF);
if (buflen < scsi_bufflen(srb))
scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
}
/***********************************************************************
* Transport routines
***********************************************************************/
/*
* Invoke the transport and basic error-handling/recovery methods
*
* This is used to send the message to the device and receive the response.
*/
void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int result;
result = rtsx_scsi_handler(srb, chip);
/*
* if the command gets aborted by the higher layers, we need to
* short-circuit all other processing.
*/
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
srb->result = DID_ABORT << 16;
goto handle_errors;
}
/* if there is a transport error, reset and don't auto-sense */
if (result == TRANSPORT_ERROR) {
dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
srb->result = DID_ERROR << 16;
goto handle_errors;
}
srb->result = SAM_STAT_GOOD;
/*
* If we have a failure, we're going to do a REQUEST_SENSE
* automatically. Note that we differentiate between a command
* "failure" and an "error" in the transport mechanism.
*/
if (result == TRANSPORT_FAILED) {
/* set the result so the higher layers expect this data */
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
(unsigned char *)&chip->sense_buffer[SCSI_LUN(srb)],
sizeof(struct sense_data_t));
}
return;
handle_errors:
return;
}
void rtsx_add_cmd(struct rtsx_chip *chip,
u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
{
__le32 *cb = (__le32 *)(chip->host_cmds_ptr);
u32 val = 0;
val |= (u32)(cmd_type & 0x03) << 30;
val |= (u32)(reg_addr & 0x3FFF) << 16;
val |= (u32)mask << 8;
val |= (u32)data;
spin_lock_irq(&chip->rtsx->reg_lock);
if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
cb[(chip->ci)++] = cpu_to_le32(val);
spin_unlock_irq(&chip->rtsx->reg_lock);
}
void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
{
u32 val = BIT(31);
rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
/* Hardware Auto Response */
val |= 0x40000000;
rtsx_writel(chip, RTSX_HCBCTLR, val);
}
int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
u32 val = BIT(31);
long timeleft;
int err = 0;
if (card == SD_CARD)
rtsx->check_card_cd = SD_EXIST;
else if (card == MS_CARD)
rtsx->check_card_cd = MS_EXIST;
else if (card == XD_CARD)
rtsx->check_card_cd = XD_EXIST;
else
rtsx->check_card_cd = 0;
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
rtsx->trans_result = TRANS_NOT_READY;
init_completion(&trans_done);
rtsx->trans_state = STATE_TRANS_CMD;
rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
/* Hardware Auto Response */
val |= 0x40000000;
rtsx_writel(chip, RTSX_HCBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
timeleft = wait_for_completion_interruptible_timeout(&trans_done,
msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
chip->int_reg);
err = -ETIMEDOUT;
goto finish_send_cmd;
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL)
err = -EIO;
else if (rtsx->trans_result == TRANS_RESULT_OK)
err = 0;
spin_unlock_irq(&rtsx->reg_lock);
finish_send_cmd:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
static inline void rtsx_add_sg_tbl(struct rtsx_chip *chip,
u32 addr, u32 len, u8 option)
{
__le64 *sgb = (__le64 *)(chip->host_sg_tbl_ptr);
u64 val = 0;
u32 temp_len = 0;
u8 temp_opt = 0;
do {
if (len > 0x80000) {
temp_len = 0x80000;
temp_opt = option & (~RTSX_SG_END);
} else {
temp_len = len;
temp_opt = option;
}
val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
sgb[(chip->sgi)++] = cpu_to_le64(val);
len -= temp_len;
addr += temp_len;
} while (len);
}
static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
struct scatterlist *sg, int num_sg,
unsigned int *index,
unsigned int *offset, int size,
enum dma_data_direction dma_dir,
int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
u8 dir;
int sg_cnt, i, resid;
int err = 0;
long timeleft;
struct scatterlist *sg_ptr;
u32 val = TRIG_DMA;
if (!sg || num_sg <= 0 || !offset || !index)
return -EIO;
if (dma_dir == DMA_TO_DEVICE)
dir = HOST_TO_DEVICE;
else if (dma_dir == DMA_FROM_DEVICE)
dir = DEVICE_TO_HOST;
else
return -ENXIO;
if (card == SD_CARD)
rtsx->check_card_cd = SD_EXIST;
else if (card == MS_CARD)
rtsx->check_card_cd = MS_EXIST;
else if (card == XD_CARD)
rtsx->check_card_cd = XD_EXIST;
else
rtsx->check_card_cd = 0;
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
rtsx->trans_state = STATE_TRANS_SG;
rtsx->trans_result = TRANS_NOT_READY;
spin_unlock_irq(&rtsx->reg_lock);
sg_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
resid = size;
sg_ptr = sg;
chip->sgi = 0;
/*
* Usually the next entry will be @sg@ + 1, but if this sg element
* is part of a chained scatterlist, it could jump to the start of
* a new scatterlist array. So here we use sg_next to move to
* the proper sg.
*/
for (i = 0; i < *index; i++)
sg_ptr = sg_next(sg_ptr);
for (i = *index; i < sg_cnt; i++) {
dma_addr_t addr;
unsigned int len;
u8 option;
addr = sg_dma_address(sg_ptr);
len = sg_dma_len(sg_ptr);
dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
(unsigned int)addr, len);
dev_dbg(rtsx_dev(chip), "*index = %d, *offset = %d\n",
*index, *offset);
addr += *offset;
if ((len - *offset) > resid) {
*offset += resid;
len = resid;
resid = 0;
} else {
resid -= (len - *offset);
len -= *offset;
*offset = 0;
*index = *index + 1;
}
option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
if ((i == sg_cnt - 1) || !resid)
option |= RTSX_SG_END;
rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
if (!resid)
break;
sg_ptr = sg_next(sg_ptr);
}
dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
val |= (u32)(dir & 0x01) << 29;
val |= ADMA_MODE;
spin_lock_irq(&rtsx->reg_lock);
init_completion(&trans_done);
rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
rtsx_writel(chip, RTSX_HDBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(&trans_done,
msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
spin_unlock_irq(&rtsx->reg_lock);
goto out;
}
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_NOT_READY) {
init_completion(&trans_done);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(&trans_done,
msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
} else {
spin_unlock_irq(&rtsx->reg_lock);
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL)
err = -EIO;
else if (rtsx->trans_result == TRANS_RESULT_OK)
err = 0;
spin_unlock_irq(&rtsx->reg_lock);
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
struct scatterlist *sg, int num_sg,
enum dma_data_direction dma_dir,
int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
u8 dir;
int buf_cnt, i;
int err = 0;
long timeleft;
struct scatterlist *sg_ptr;
if (!sg || num_sg <= 0)
return -EIO;
if (dma_dir == DMA_TO_DEVICE)
dir = HOST_TO_DEVICE;
else if (dma_dir == DMA_FROM_DEVICE)
dir = DEVICE_TO_HOST;
else
return -ENXIO;
if (card == SD_CARD)
rtsx->check_card_cd = SD_EXIST;
else if (card == MS_CARD)
rtsx->check_card_cd = MS_EXIST;
else if (card == XD_CARD)
rtsx->check_card_cd = XD_EXIST;
else
rtsx->check_card_cd = 0;
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
rtsx->trans_state = STATE_TRANS_SG;
rtsx->trans_result = TRANS_NOT_READY;
spin_unlock_irq(&rtsx->reg_lock);
buf_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
sg_ptr = sg;
for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
u32 val = TRIG_DMA;
int sg_cnt, j;
if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
else
sg_cnt = HOST_SG_TBL_BUF_LEN / 8;
chip->sgi = 0;
for (j = 0; j < sg_cnt; j++) {
dma_addr_t addr = sg_dma_address(sg_ptr);
unsigned int len = sg_dma_len(sg_ptr);
u8 option;
dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
(unsigned int)addr, len);
option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
if (j == (sg_cnt - 1))
option |= RTSX_SG_END;
rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
sg_ptr = sg_next(sg_ptr);
}
dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
val |= (u32)(dir & 0x01) << 29;
val |= ADMA_MODE;
spin_lock_irq(&rtsx->reg_lock);
init_completion(&trans_done);
rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
rtsx_writel(chip, RTSX_HDBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(&trans_done,
msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
spin_unlock_irq(&rtsx->reg_lock);
goto out;
}
spin_unlock_irq(&rtsx->reg_lock);
sg_ptr += sg_cnt;
}
/* Wait for TRANS_OK_INT */
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_NOT_READY) {
init_completion(&trans_done);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(&trans_done,
msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
} else {
spin_unlock_irq(&rtsx->reg_lock);
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL)
err = -EIO;
else if (rtsx->trans_result == TRANS_RESULT_OK)
err = 0;
spin_unlock_irq(&rtsx->reg_lock);
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
size_t len, enum dma_data_direction dma_dir,
int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
dma_addr_t addr;
u8 dir;
int err = 0;
u32 val = BIT(31);
long timeleft;
if (!buf || len <= 0)
return -EIO;
if (dma_dir == DMA_TO_DEVICE)
dir = HOST_TO_DEVICE;
else if (dma_dir == DMA_FROM_DEVICE)
dir = DEVICE_TO_HOST;
else
return -ENXIO;
addr = dma_map_single(&rtsx->pci->dev, buf, len, dma_dir);
if (dma_mapping_error(&rtsx->pci->dev, addr))
return -ENOMEM;
if (card == SD_CARD)
rtsx->check_card_cd = SD_EXIST;
else if (card == MS_CARD)
rtsx->check_card_cd = MS_EXIST;
else if (card == XD_CARD)
rtsx->check_card_cd = XD_EXIST;
else
rtsx->check_card_cd = 0;
val |= (u32)(dir & 0x01) << 29;
val |= (u32)(len & 0x00FFFFFF);
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
init_completion(&trans_done);
rtsx->trans_state = STATE_TRANS_BUF;
rtsx->trans_result = TRANS_NOT_READY;
rtsx_writel(chip, RTSX_HDBAR, addr);
rtsx_writel(chip, RTSX_HDBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
timeleft = wait_for_completion_interruptible_timeout(&trans_done,
msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL)
err = -EIO;
else if (rtsx->trans_result == TRANS_RESULT_OK)
err = 0;
spin_unlock_irq(&rtsx->reg_lock);
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
dma_unmap_single(&rtsx->pci->dev, addr, len, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
void *buf, size_t len, int use_sg,
unsigned int *index, unsigned int *offset,
enum dma_data_direction dma_dir, int timeout)
{
int err = 0;
/* don't transfer data during abort processing */
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
return -EIO;
if (use_sg) {
struct scatterlist *sg = buf;
err = rtsx_transfer_sglist_adma_partial(chip, card, sg, use_sg,
index, offset, (int)len,
dma_dir, timeout);
} else {
err = rtsx_transfer_buf(chip, card,
buf, len, dma_dir, timeout);
}
if (err < 0) {
if (RTSX_TST_DELINK(chip)) {
RTSX_CLR_DELINK(chip);
chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
rtsx_reinit_cards(chip, 1);
}
}
return err;
}
int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
int use_sg, enum dma_data_direction dma_dir, int timeout)
{
int err = 0;
dev_dbg(rtsx_dev(chip), "use_sg = %d\n", use_sg);
/* don't transfer data during abort processing */
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
return -EIO;
if (use_sg) {
err = rtsx_transfer_sglist_adma(chip, card, buf,
use_sg, dma_dir, timeout);
} else {
err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
}
if (err < 0) {
if (RTSX_TST_DELINK(chip)) {
RTSX_CLR_DELINK(chip);
chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
rtsx_reinit_cards(chip, 1);
}
}
return err;
}
| linux-master | drivers/staging/rts5208/rtsx_transport.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG ([email protected])
* Micky Ching ([email protected])
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/vmalloc.h>
#include "rtsx.h"
#include "sd.h"
#include "xd.h"
#include "ms.h"
static void rtsx_calibration(struct rtsx_chip *chip)
{
rtsx_write_phy_register(chip, 0x1B, 0x135E);
wait_timeout(10);
rtsx_write_phy_register(chip, 0x00, 0x0280);
rtsx_write_phy_register(chip, 0x01, 0x7112);
rtsx_write_phy_register(chip, 0x01, 0x7110);
rtsx_write_phy_register(chip, 0x01, 0x7112);
rtsx_write_phy_register(chip, 0x01, 0x7113);
rtsx_write_phy_register(chip, 0x00, 0x0288);
}
void rtsx_enable_card_int(struct rtsx_chip *chip)
{
u32 reg = rtsx_readl(chip, RTSX_BIER);
int i;
for (i = 0; i <= chip->max_lun; i++) {
if (chip->lun2card[i] & XD_CARD)
reg |= XD_INT_EN;
if (chip->lun2card[i] & SD_CARD)
reg |= SD_INT_EN;
if (chip->lun2card[i] & MS_CARD)
reg |= MS_INT_EN;
}
if (chip->hw_bypass_sd)
reg &= ~((u32)SD_INT_EN);
rtsx_writel(chip, RTSX_BIER, reg);
}
void rtsx_enable_bus_int(struct rtsx_chip *chip)
{
u32 reg = 0;
#ifndef DISABLE_CARD_INT
int i;
#endif
reg = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN;
#ifndef DISABLE_CARD_INT
for (i = 0; i <= chip->max_lun; i++) {
dev_dbg(rtsx_dev(chip), "lun2card[%d] = 0x%02x\n",
i, chip->lun2card[i]);
if (chip->lun2card[i] & XD_CARD)
reg |= XD_INT_EN;
if (chip->lun2card[i] & SD_CARD)
reg |= SD_INT_EN;
if (chip->lun2card[i] & MS_CARD)
reg |= MS_INT_EN;
}
if (chip->hw_bypass_sd)
reg &= ~((u32)SD_INT_EN);
#endif
if (chip->ic_version >= IC_VER_C)
reg |= DELINK_INT_EN;
#ifdef SUPPORT_OCP
reg |= OC_INT_EN;
#endif
if (!chip->adma_mode)
reg |= DATA_DONE_INT_EN;
/* Enable Bus Interrupt */
rtsx_writel(chip, RTSX_BIER, reg);
dev_dbg(rtsx_dev(chip), "RTSX_BIER: 0x%08x\n", reg);
}
void rtsx_disable_bus_int(struct rtsx_chip *chip)
{
rtsx_writel(chip, RTSX_BIER, 0);
}
static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
{
int retval;
if (chip->ignore_sd && CHK_SDIO_EXIST(chip)) {
if (chip->asic_code) {
retval = rtsx_write_register(chip, CARD_PULL_CTL5,
0xFF,
MS_INS_PU | SD_WP_PU |
SD_CD_PU | SD_CMD_PU);
if (retval)
return retval;
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL,
0xFF,
FPGA_SD_PULL_CTL_EN);
if (retval)
return retval;
}
retval = rtsx_write_register(chip, CARD_SHARE_MODE, 0xFF,
CARD_SHARE_48_SD);
if (retval)
return retval;
/* Enable SDIO internal clock */
retval = rtsx_write_register(chip, 0xFF2C, 0x01, 0x01);
if (retval)
return retval;
retval = rtsx_write_register(chip, SDIO_CTRL, 0xFF,
SDIO_BUS_CTRL | SDIO_CD_CTRL);
if (retval)
return retval;
chip->sd_int = 1;
chip->sd_io = 1;
} else {
chip->need_reset |= SD_CARD;
}
return STATUS_SUCCESS;
}
#ifdef HW_AUTO_SWITCH_SD_BUS
static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
{
u8 tmp;
bool sw_bypass_sd = false;
int retval;
if (chip->driver_first_load) {
if (CHECK_PID(chip, 0x5288)) {
retval = rtsx_read_register(chip, 0xFE5A, &tmp);
if (retval)
return retval;
if (tmp & 0x08)
sw_bypass_sd = true;
} else if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_read_register(chip, 0xFE70, &tmp);
if (retval)
return retval;
if (tmp & 0x80)
sw_bypass_sd = true;
}
} else {
if (chip->sdio_in_charge)
sw_bypass_sd = true;
}
dev_dbg(rtsx_dev(chip), "chip->sdio_in_charge = %d\n",
chip->sdio_in_charge);
dev_dbg(rtsx_dev(chip), "chip->driver_first_load = %d\n",
chip->driver_first_load);
dev_dbg(rtsx_dev(chip), "sw_bypass_sd = %d\n",
sw_bypass_sd);
if (sw_bypass_sd) {
u8 cd_toggle_mask = 0;
retval = rtsx_read_register(chip, TLPTISTAT, &tmp);
if (retval)
return retval;
cd_toggle_mask = 0x08;
if (tmp & cd_toggle_mask) {
/* Disable sdio_bus_auto_switch */
if (CHECK_PID(chip, 0x5288)) {
retval = rtsx_write_register(chip, 0xFE5A,
0x08, 0x00);
if (retval)
return retval;
} else if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, 0xFE70,
0x80, 0x00);
if (retval)
return retval;
}
retval = rtsx_write_register(chip, TLPTISTAT, 0xFF,
tmp);
if (retval)
return retval;
chip->need_reset |= SD_CARD;
} else {
dev_dbg(rtsx_dev(chip), "Chip inserted with SDIO!\n");
if (chip->asic_code) {
retval = sd_pull_ctl_enable(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
retval = rtsx_write_register
(chip, FPGA_PULL_CTL,
FPGA_SD_PULL_CTL_BIT | 0x20,
0);
if (retval)
return retval;
}
retval = card_share_mode(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
/* Enable sdio_bus_auto_switch */
if (CHECK_PID(chip, 0x5288)) {
retval = rtsx_write_register(chip, 0xFE5A,
0x08, 0x08);
if (retval)
return retval;
} else if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, 0xFE70,
0x80, 0x80);
if (retval)
return retval;
}
chip->chip_insert_with_sdio = 1;
chip->sd_io = 1;
}
} else {
retval = rtsx_write_register(chip, TLPTISTAT, 0x08, 0x08);
if (retval)
return retval;
chip->need_reset |= SD_CARD;
}
return STATUS_SUCCESS;
}
#endif
static int rtsx_reset_aspm(struct rtsx_chip *chip)
{
int ret;
if (chip->dynamic_aspm) {
if (!CHK_SDIO_EXIST(chip) || !CHECK_PID(chip, 0x5288))
return STATUS_SUCCESS;
ret = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF,
chip->aspm_l0s_l1_en);
if (ret != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
if (CHECK_PID(chip, 0x5208)) {
ret = rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFF, 0x3F);
if (ret)
return ret;
}
ret = rtsx_write_config_byte(chip, LCTLR, chip->aspm_l0s_l1_en);
if (ret != STATUS_SUCCESS)
return STATUS_FAIL;
chip->aspm_level[0] = chip->aspm_l0s_l1_en;
if (CHK_SDIO_EXIST(chip)) {
chip->aspm_level[1] = chip->aspm_l0s_l1_en;
ret = rtsx_write_cfg_dw(chip, CHECK_PID(chip, 0x5288) ? 2 : 1,
0xC0, 0xFF, chip->aspm_l0s_l1_en);
if (ret != STATUS_SUCCESS)
return STATUS_FAIL;
}
chip->aspm_enabled = 1;
return STATUS_SUCCESS;
}
static int rtsx_enable_pcie_intr(struct rtsx_chip *chip)
{
int ret;
if (!chip->asic_code || !CHECK_PID(chip, 0x5208)) {
rtsx_enable_bus_int(chip);
return STATUS_SUCCESS;
}
if (chip->phy_debug_mode) {
ret = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0);
if (ret)
return ret;
rtsx_disable_bus_int(chip);
} else {
rtsx_enable_bus_int(chip);
}
if (chip->ic_version >= IC_VER_D) {
u16 reg;
ret = rtsx_read_phy_register(chip, 0x00, ®);
if (ret != STATUS_SUCCESS)
return STATUS_FAIL;
reg &= 0xFE7F;
reg |= 0x80;
ret = rtsx_write_phy_register(chip, 0x00, reg);
if (ret != STATUS_SUCCESS)
return STATUS_FAIL;
ret = rtsx_read_phy_register(chip, 0x1C, ®);
if (ret != STATUS_SUCCESS)
return STATUS_FAIL;
reg &= 0xFFF7;
ret = rtsx_write_phy_register(chip, 0x1C, reg);
if (ret != STATUS_SUCCESS)
return STATUS_FAIL;
}
if (chip->driver_first_load && chip->ic_version < IC_VER_C)
rtsx_calibration(chip);
return STATUS_SUCCESS;
}
int rtsx_reset_chip(struct rtsx_chip *chip)
{
int retval;
rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
rtsx_disable_aspm(chip);
retval = rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 0x00);
if (retval)
return retval;
/* Disable card clock */
retval = rtsx_write_register(chip, CARD_CLK_EN, 0x1E, 0);
if (retval)
return retval;
#ifdef SUPPORT_OCP
/* SSC power on, OCD power on */
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
retval = rtsx_write_register(chip, FPDCTL, OC_POWER_DOWN, 0);
if (retval)
return retval;
} else {
retval = rtsx_write_register(chip, FPDCTL, OC_POWER_DOWN,
MS_OC_POWER_DOWN);
if (retval)
return retval;
}
retval = rtsx_write_register(chip, OCPPARA1, OCP_TIME_MASK,
OCP_TIME_800);
if (retval)
return retval;
retval = rtsx_write_register(chip, OCPPARA2, OCP_THD_MASK,
OCP_THD_244_946);
if (retval)
return retval;
retval = rtsx_write_register(chip, OCPCTL, 0xFF,
CARD_OC_INT_EN | CARD_DETECT_EN);
if (retval)
return retval;
#else
/* OC power down */
retval = rtsx_write_register(chip, FPDCTL, OC_POWER_DOWN,
OC_POWER_DOWN);
if (retval)
return retval;
#endif
if (!CHECK_PID(chip, 0x5288)) {
retval = rtsx_write_register(chip, CARD_GPIO_DIR, 0xFF, 0x03);
if (retval)
return retval;
}
/* Turn off LED */
retval = rtsx_write_register(chip, CARD_GPIO, 0xFF, 0x03);
if (retval)
return retval;
/* Reset delink mode */
retval = rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0);
if (retval)
return retval;
/* Card driving select */
retval = rtsx_write_register(chip, CARD_DRIVE_SEL, 0xFF,
chip->card_drive_sel);
if (retval)
return retval;
#ifdef LED_AUTO_BLINK
retval = rtsx_write_register(chip, CARD_AUTO_BLINK, 0xFF,
LED_BLINK_SPEED | BLINK_EN | LED_GPIO0);
if (retval)
return retval;
#endif
if (chip->asic_code) {
/* Enable SSC Clock */
retval = rtsx_write_register(chip, SSC_CTL1, 0xFF,
SSC_8X_EN | SSC_SEL_4M);
if (retval)
return retval;
retval = rtsx_write_register(chip, SSC_CTL2, 0xFF, 0x12);
if (retval)
return retval;
}
/*
* Disable cd_pwr_save (u_force_rst_core_en=0, u_cd_rst_core_en=0)
* 0xFE5B
* bit[1] u_cd_rst_core_en rst_value = 0
* bit[2] u_force_rst_core_en rst_value = 0
* bit[5] u_mac_phy_rst_n_dbg rst_value = 1
* bit[4] u_non_sticky_rst_n_dbg rst_value = 0
*/
retval = rtsx_write_register(chip, CHANGE_LINK_STATE, 0x16, 0x10);
if (retval)
return retval;
/* Enable ASPM */
if (chip->aspm_l0s_l1_en) {
retval = rtsx_reset_aspm(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_phy_register(chip, 0x07, 0x0129);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
retval = rtsx_write_config_byte(chip, LCTLR,
chip->aspm_l0s_l1_en);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
retval = rtsx_write_config_byte(chip, 0x81, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (CHK_SDIO_EXIST(chip)) {
retval = rtsx_write_cfg_dw(chip,
CHECK_PID(chip, 0x5288) ? 2 : 1,
0xC0, 0xFF00, 0x0100);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
if (CHECK_PID(chip, 0x5288) && !CHK_SDIO_EXIST(chip)) {
retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFFFF, 0x0103);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
retval = rtsx_write_cfg_dw(chip, 2, 0x84, 0xFF, 0x03);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT,
LINK_RDY_INT);
if (retval)
return retval;
retval = rtsx_write_register(chip, PERST_GLITCH_WIDTH, 0xFF, 0x80);
if (retval)
return retval;
retval = rtsx_enable_pcie_intr(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
chip->need_reset = 0;
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
if (chip->hw_bypass_sd)
goto nextcard;
dev_dbg(rtsx_dev(chip), "In %s, chip->int_reg = 0x%x\n", __func__,
chip->int_reg);
if (chip->int_reg & SD_EXIST) {
#ifdef HW_AUTO_SWITCH_SD_BUS
if (CHECK_PID(chip, 0x5208) && chip->ic_version < IC_VER_C)
retval = rtsx_pre_handle_sdio_old(chip);
else
retval = rtsx_pre_handle_sdio_new(chip);
dev_dbg(rtsx_dev(chip), "chip->need_reset = 0x%x (%s)\n",
(unsigned int)(chip->need_reset), __func__);
#else /* HW_AUTO_SWITCH_SD_BUS */
retval = rtsx_pre_handle_sdio_old(chip);
#endif /* HW_AUTO_SWITCH_SD_BUS */
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else {
chip->sd_io = 0;
retval = rtsx_write_register(chip, SDIO_CTRL,
SDIO_BUS_CTRL | SDIO_CD_CTRL, 0);
if (retval)
return retval;
}
nextcard:
if (chip->int_reg & XD_EXIST)
chip->need_reset |= XD_CARD;
if (chip->int_reg & MS_EXIST)
chip->need_reset |= MS_CARD;
if (chip->int_reg & CARD_EXIST) {
retval = rtsx_write_register(chip, SSC_CTL1, SSC_RSTB,
SSC_RSTB);
if (retval)
return retval;
}
dev_dbg(rtsx_dev(chip), "In %s, chip->need_reset = 0x%x\n", __func__,
(unsigned int)(chip->need_reset));
retval = rtsx_write_register(chip, RCCTL, 0x01, 0x00);
if (retval)
return retval;
if (CHECK_PID(chip, 0x5208) || CHECK_PID(chip, 0x5288)) {
/* Turn off main power when entering S3/S4 state */
retval = rtsx_write_register(chip, MAIN_PWR_OFF_CTL, 0x03,
0x03);
if (retval)
return retval;
}
if (chip->remote_wakeup_en && !chip->auto_delink_en) {
retval = rtsx_write_register(chip, WAKE_SEL_CTL, 0x07, 0x07);
if (retval)
return retval;
if (chip->aux_pwr_exist) {
retval = rtsx_write_register(chip, PME_FORCE_CTL,
0xFF, 0x33);
if (retval)
return retval;
}
} else {
retval = rtsx_write_register(chip, WAKE_SEL_CTL, 0x07, 0x04);
if (retval)
return retval;
retval = rtsx_write_register(chip, PME_FORCE_CTL, 0xFF, 0x30);
if (retval)
return retval;
}
if (CHECK_PID(chip, 0x5208) && chip->ic_version >= IC_VER_D) {
retval = rtsx_write_register(chip, PETXCFG, 0x1C, 0x14);
if (retval)
return retval;
}
if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
retval = rtsx_clr_phy_reg_bit(chip, 0x1C, 2);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
if (chip->ft2_fast_mode) {
retval = rtsx_write_register(chip, CARD_PWR_CTL, 0xFF,
MS_PARTIAL_POWER_ON |
SD_PARTIAL_POWER_ON);
if (retval)
return retval;
udelay(chip->pmos_pwr_on_interval);
retval = rtsx_write_register(chip, CARD_PWR_CTL, 0xFF,
MS_POWER_ON | SD_POWER_ON);
if (retval)
return retval;
wait_timeout(200);
}
/* Reset card */
rtsx_reset_detected_cards(chip, 0);
chip->driver_first_load = 0;
return STATUS_SUCCESS;
}
static inline int valid_sd_speed_prior(u32 sd_speed_prior)
{
bool valid_para = true;
int i;
for (i = 0; i < 4; i++) {
u8 tmp = (u8)(sd_speed_prior >> (i * 8));
if (tmp < 0x01 || tmp > 0x04) {
valid_para = false;
break;
}
}
return valid_para;
}
static inline int valid_sd_current_prior(u32 sd_current_prior)
{
bool valid_para = true;
int i;
for (i = 0; i < 4; i++) {
u8 tmp = (u8)(sd_current_prior >> (i * 8));
if (tmp > 0x03) {
valid_para = false;
break;
}
}
return valid_para;
}
static int rts5208_init(struct rtsx_chip *chip)
{
int retval;
u16 reg = 0;
u8 val = 0;
retval = rtsx_write_register(chip, CLK_SEL, 0x03, 0x03);
if (retval)
return retval;
retval = rtsx_read_register(chip, CLK_SEL, &val);
if (retval)
return retval;
chip->asic_code = val == 0 ? 1 : 0;
if (chip->asic_code) {
retval = rtsx_read_phy_register(chip, 0x1C, ®);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
dev_dbg(rtsx_dev(chip), "Value of phy register 0x1C is 0x%x\n",
reg);
chip->ic_version = (reg >> 4) & 0x07;
chip->phy_debug_mode = reg & PHY_DEBUG_MODE ? 1 : 0;
} else {
retval = rtsx_read_register(chip, 0xFE80, &val);
if (retval)
return retval;
chip->ic_version = val;
chip->phy_debug_mode = 0;
}
retval = rtsx_read_register(chip, PDINFO, &val);
if (retval)
return retval;
dev_dbg(rtsx_dev(chip), "PDINFO: 0x%x\n", val);
chip->aux_pwr_exist = val & AUX_PWR_DETECTED ? 1 : 0;
retval = rtsx_read_register(chip, 0xFE50, &val);
if (retval)
return retval;
chip->hw_bypass_sd = val & 0x01 ? 1 : 0;
rtsx_read_config_byte(chip, 0x0E, &val);
if (val & 0x80)
SET_SDIO_EXIST(chip);
else
CLR_SDIO_EXIST(chip);
if (chip->use_hw_setting) {
retval = rtsx_read_register(chip, CHANGE_LINK_STATE, &val);
if (retval)
return retval;
chip->auto_delink_en = val & 0x80 ? 1 : 0;
}
return STATUS_SUCCESS;
}
static int rts5288_init(struct rtsx_chip *chip)
{
int retval;
u8 val = 0, max_func;
u32 lval = 0;
retval = rtsx_write_register(chip, CLK_SEL, 0x03, 0x03);
if (retval)
return retval;
retval = rtsx_read_register(chip, CLK_SEL, &val);
if (retval)
return retval;
chip->asic_code = val == 0 ? 1 : 0;
chip->ic_version = 0;
chip->phy_debug_mode = 0;
retval = rtsx_read_register(chip, PDINFO, &val);
if (retval)
return retval;
dev_dbg(rtsx_dev(chip), "PDINFO: 0x%x\n", val);
chip->aux_pwr_exist = val & AUX_PWR_DETECTED ? 1 : 0;
retval = rtsx_read_register(chip, CARD_SHARE_MODE, &val);
if (retval)
return retval;
dev_dbg(rtsx_dev(chip), "CARD_SHARE_MODE: 0x%x\n", val);
chip->baro_pkg = val & 0x04 ? QFN : LQFP;
retval = rtsx_read_register(chip, 0xFE5A, &val);
if (retval)
return retval;
chip->hw_bypass_sd = val & 0x10 ? 1 : 0;
retval = rtsx_read_cfg_dw(chip, 0, 0x718, &lval);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
max_func = (u8)((lval >> 29) & 0x07);
dev_dbg(rtsx_dev(chip), "Max function number: %d\n", max_func);
if (max_func == 0x02)
SET_SDIO_EXIST(chip);
else
CLR_SDIO_EXIST(chip);
if (chip->use_hw_setting) {
retval = rtsx_read_register(chip, CHANGE_LINK_STATE, &val);
if (retval)
return retval;
chip->auto_delink_en = val & 0x80 ? 1 : 0;
if (CHECK_BARO_PKG(chip, LQFP))
chip->lun_mode = SD_MS_1LUN;
else
chip->lun_mode = DEFAULT_SINGLE;
}
return STATUS_SUCCESS;
}
int rtsx_init_chip(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
struct xd_info *xd_card = &chip->xd_card;
struct ms_info *ms_card = &chip->ms_card;
int retval;
unsigned int i;
dev_dbg(rtsx_dev(chip), "Vendor ID: 0x%04x, Product ID: 0x%04x\n",
chip->vendor_id, chip->product_id);
chip->ic_version = 0;
memset(xd_card, 0, sizeof(struct xd_info));
memset(sd_card, 0, sizeof(struct sd_info));
memset(ms_card, 0, sizeof(struct ms_info));
chip->xd_reset_counter = 0;
chip->sd_reset_counter = 0;
chip->ms_reset_counter = 0;
chip->xd_show_cnt = MAX_SHOW_CNT;
chip->sd_show_cnt = MAX_SHOW_CNT;
chip->ms_show_cnt = MAX_SHOW_CNT;
chip->sd_io = 0;
chip->auto_delink_cnt = 0;
chip->auto_delink_allowed = 1;
rtsx_set_stat(chip, RTSX_STAT_INIT);
chip->aspm_enabled = 0;
chip->chip_insert_with_sdio = 0;
chip->sdio_aspm = 0;
chip->sdio_idle = 0;
chip->sdio_counter = 0;
chip->cur_card = 0;
chip->phy_debug_mode = 0;
chip->sdio_func_exist = 0;
memset(chip->sdio_raw_data, 0, 12);
for (i = 0; i < MAX_ALLOWED_LUN_CNT; i++) {
set_sense_type(chip, i, SENSE_TYPE_NO_SENSE);
chip->rw_fail_cnt[i] = 0;
}
if (!valid_sd_speed_prior(chip->sd_speed_prior))
chip->sd_speed_prior = 0x01040203;
dev_dbg(rtsx_dev(chip), "sd_speed_prior = 0x%08x\n",
chip->sd_speed_prior);
if (!valid_sd_current_prior(chip->sd_current_prior))
chip->sd_current_prior = 0x00010203;
dev_dbg(rtsx_dev(chip), "sd_current_prior = 0x%08x\n",
chip->sd_current_prior);
if (chip->sd_ddr_tx_phase > 31 || chip->sd_ddr_tx_phase < 0)
chip->sd_ddr_tx_phase = 0;
if (chip->mmc_ddr_tx_phase > 31 || chip->mmc_ddr_tx_phase < 0)
chip->mmc_ddr_tx_phase = 0;
retval = rtsx_write_register(chip, FPDCTL, SSC_POWER_DOWN, 0);
if (retval)
return retval;
wait_timeout(200);
retval = rtsx_write_register(chip, CLK_DIV, 0x07, 0x07);
if (retval)
return retval;
dev_dbg(rtsx_dev(chip), "chip->use_hw_setting = %d\n",
chip->use_hw_setting);
if (CHECK_PID(chip, 0x5208)) {
retval = rts5208_init(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
} else if (CHECK_PID(chip, 0x5288)) {
retval = rts5288_init(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
if (chip->ss_en == 2)
chip->ss_en = 0;
dev_dbg(rtsx_dev(chip), "chip->asic_code = %d\n", chip->asic_code);
dev_dbg(rtsx_dev(chip), "chip->ic_version = 0x%x\n", chip->ic_version);
dev_dbg(rtsx_dev(chip), "chip->phy_debug_mode = %d\n",
chip->phy_debug_mode);
dev_dbg(rtsx_dev(chip), "chip->aux_pwr_exist = %d\n",
chip->aux_pwr_exist);
dev_dbg(rtsx_dev(chip), "chip->sdio_func_exist = %d\n",
chip->sdio_func_exist);
dev_dbg(rtsx_dev(chip), "chip->hw_bypass_sd = %d\n",
chip->hw_bypass_sd);
dev_dbg(rtsx_dev(chip), "chip->aspm_l0s_l1_en = %d\n",
chip->aspm_l0s_l1_en);
dev_dbg(rtsx_dev(chip), "chip->lun_mode = %d\n", chip->lun_mode);
dev_dbg(rtsx_dev(chip), "chip->auto_delink_en = %d\n",
chip->auto_delink_en);
dev_dbg(rtsx_dev(chip), "chip->ss_en = %d\n", chip->ss_en);
dev_dbg(rtsx_dev(chip), "chip->baro_pkg = %d\n", chip->baro_pkg);
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
chip->card2lun[SD_CARD] = 0;
chip->card2lun[MS_CARD] = 1;
chip->card2lun[XD_CARD] = 0xFF;
chip->lun2card[0] = SD_CARD;
chip->lun2card[1] = MS_CARD;
chip->max_lun = 1;
SET_SDIO_IGNORED(chip);
} else if (CHECK_LUN_MODE(chip, SD_MS_1LUN)) {
chip->card2lun[SD_CARD] = 0;
chip->card2lun[MS_CARD] = 0;
chip->card2lun[XD_CARD] = 0xFF;
chip->lun2card[0] = SD_CARD | MS_CARD;
chip->max_lun = 0;
} else {
chip->card2lun[XD_CARD] = 0;
chip->card2lun[SD_CARD] = 0;
chip->card2lun[MS_CARD] = 0;
chip->lun2card[0] = XD_CARD | SD_CARD | MS_CARD;
chip->max_lun = 0;
}
retval = rtsx_reset_chip(chip);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
void rtsx_release_chip(struct rtsx_chip *chip)
{
xd_free_l2p_tbl(chip);
ms_free_l2p_tbl(chip);
chip->card_exist = 0;
chip->card_ready = 0;
}
#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
static inline void rtsx_blink_led(struct rtsx_chip *chip)
{
if (chip->card_exist && chip->blink_led) {
if (chip->led_toggle_counter < LED_TOGGLE_INTERVAL) {
chip->led_toggle_counter++;
} else {
chip->led_toggle_counter = 0;
toggle_gpio(chip, LED_GPIO);
}
}
}
#endif
static void rtsx_monitor_aspm_config(struct rtsx_chip *chip)
{
bool reg_changed, maybe_support_aspm;
u32 tmp = 0;
u8 reg0 = 0, reg1 = 0;
maybe_support_aspm = false;
reg_changed = false;
rtsx_read_config_byte(chip, LCTLR, ®0);
if (chip->aspm_level[0] != reg0) {
reg_changed = true;
chip->aspm_level[0] = reg0;
}
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) {
rtsx_read_cfg_dw(chip, 1, 0xC0, &tmp);
reg1 = (u8)tmp;
if (chip->aspm_level[1] != reg1) {
reg_changed = true;
chip->aspm_level[1] = reg1;
}
if ((reg0 & 0x03) && (reg1 & 0x03))
maybe_support_aspm = true;
} else {
if (reg0 & 0x03)
maybe_support_aspm = true;
}
if (reg_changed) {
if (maybe_support_aspm)
chip->aspm_l0s_l1_en = 0x03;
dev_dbg(rtsx_dev(chip),
"aspm_level[0] = 0x%02x, aspm_level[1] = 0x%02x\n",
chip->aspm_level[0], chip->aspm_level[1]);
if (chip->aspm_l0s_l1_en) {
chip->aspm_enabled = 1;
} else {
chip->aspm_enabled = 0;
chip->sdio_aspm = 0;
}
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFF,
0x30 | chip->aspm_level[0] |
(chip->aspm_level[1] << 2));
}
}
static void rtsx_manage_ocp(struct rtsx_chip *chip)
{
#ifdef SUPPORT_OCP
if (!chip->ocp_int)
return;
rtsx_read_register(chip, OCPSTAT, &chip->ocp_stat);
if (chip->card_exist & SD_CARD)
sd_power_off_card3v3(chip);
else if (chip->card_exist & MS_CARD)
ms_power_off_card3v3(chip);
else if (chip->card_exist & XD_CARD)
xd_power_off_card3v3(chip);
chip->ocp_int = 0;
#endif
}
static void rtsx_manage_sd_lock(struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
struct sd_info *sd_card = &chip->sd_card;
u8 val;
if (!sd_card->sd_erase_status)
return;
if (chip->card_exist & SD_CARD) {
rtsx_read_register(chip, 0xFD30, &val);
if (val & 0x02) {
sd_card->sd_erase_status = SD_NOT_ERASE;
sd_card->sd_lock_notify = 1;
chip->need_reinit |= SD_CARD;
}
} else {
sd_card->sd_erase_status = SD_NOT_ERASE;
}
#endif
}
static bool rtsx_is_ss_allowed(struct rtsx_chip *chip)
{
u32 val;
if (!chip->ss_en || CHECK_PID(chip, 0x5288))
return false;
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) {
rtsx_read_cfg_dw(chip, 1, 0x04, &val);
if (val & 0x07)
return false;
}
return true;
}
static void rtsx_manage_ss(struct rtsx_chip *chip)
{
if (!rtsx_is_ss_allowed(chip) || chip->sd_io)
return;
if (rtsx_get_stat(chip) != RTSX_STAT_IDLE) {
chip->ss_counter = 0;
return;
}
if (chip->ss_counter < (chip->ss_idle_period / POLLING_INTERVAL))
chip->ss_counter++;
else
rtsx_exclusive_enter_ss(chip);
}
static void rtsx_manage_aspm(struct rtsx_chip *chip)
{
u8 data;
if (!CHECK_PID(chip, 0x5208))
return;
rtsx_monitor_aspm_config(chip);
#ifdef SUPPORT_SDIO_ASPM
if (!CHK_SDIO_EXIST(chip) || CHK_SDIO_IGNORED(chip) ||
!chip->aspm_l0s_l1_en || !chip->dynamic_aspm)
return;
if (chip->sd_io) {
dynamic_configure_sdio_aspm(chip);
return;
}
if (chip->sdio_aspm)
return;
dev_dbg(rtsx_dev(chip), "SDIO enter ASPM!\n");
data = 0x30 | (chip->aspm_level[1] << 2);
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC, data);
chip->sdio_aspm = 1;
#endif
}
static void rtsx_manage_idle(struct rtsx_chip *chip)
{
if (chip->idle_counter < IDLE_MAX_COUNT) {
chip->idle_counter++;
return;
}
if (rtsx_get_stat(chip) == RTSX_STAT_IDLE)
return;
dev_dbg(rtsx_dev(chip), "Idle state!\n");
rtsx_set_stat(chip, RTSX_STAT_IDLE);
#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
chip->led_toggle_counter = 0;
#endif
rtsx_force_power_on(chip, SSC_PDCTL);
turn_off_led(chip, LED_GPIO);
if (chip->auto_power_down && !chip->card_ready && !chip->sd_io)
rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
}
static void rtsx_manage_2lun_mode(struct rtsx_chip *chip)
{
#ifdef SUPPORT_OCP
u8 sd_oc, ms_oc;
sd_oc = chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER);
ms_oc = chip->ocp_stat & (MS_OC_NOW | MS_OC_EVER);
if (sd_oc || ms_oc)
dev_dbg(rtsx_dev(chip), "Over current, OCPSTAT is 0x%x\n",
chip->ocp_stat);
if (sd_oc && (chip->card_exist & SD_CARD)) {
rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN, 0);
card_power_off(chip, SD_CARD);
chip->card_fail |= SD_CARD;
}
if (ms_oc && (chip->card_exist & MS_CARD)) {
rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN, 0);
card_power_off(chip, MS_CARD);
chip->card_fail |= MS_CARD;
}
#endif
}
static void rtsx_manage_1lun_mode(struct rtsx_chip *chip)
{
#ifdef SUPPORT_OCP
if (!(chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)))
return;
dev_dbg(rtsx_dev(chip), "Over current, OCPSTAT is 0x%x\n",
chip->ocp_stat);
if (chip->card_exist & SD_CARD) {
rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN, 0);
chip->card_fail |= SD_CARD;
} else if (chip->card_exist & MS_CARD) {
rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN, 0);
chip->card_fail |= MS_CARD;
} else if (chip->card_exist & XD_CARD) {
rtsx_write_register(chip, CARD_OE, XD_OUTPUT_EN, 0);
chip->card_fail |= XD_CARD;
}
card_power_off(chip, SD_CARD);
#endif
}
static void rtsx_delink_stage1(struct rtsx_chip *chip, int enter_L1,
int stage3_cnt)
{
u8 val;
rtsx_set_stat(chip, RTSX_STAT_DELINK);
if (chip->asic_code && CHECK_PID(chip, 0x5208))
rtsx_set_phy_reg_bit(chip, 0x1C, 2);
if (chip->card_exist)
dev_dbg(rtsx_dev(chip), "False card inserted, do force delink\n");
else
dev_dbg(rtsx_dev(chip), "No card inserted, do delink\n");
if (enter_L1)
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 1);
if (chip->card_exist)
val = 0x02;
else
val = 0x0A;
rtsx_write_register(chip, CHANGE_LINK_STATE, val, val);
if (enter_L1)
rtsx_enter_L1(chip);
if (chip->card_exist)
chip->auto_delink_cnt = stage3_cnt + 1;
}
static void rtsx_delink_stage(struct rtsx_chip *chip)
{
int delink_stage1_cnt, delink_stage2_cnt, delink_stage3_cnt;
int enter_L1;
if (!chip->auto_delink_en || !chip->auto_delink_allowed ||
chip->card_ready || chip->card_ejected || chip->sd_io) {
chip->auto_delink_cnt = 0;
return;
}
enter_L1 = chip->auto_delink_in_L1 &&
(chip->aspm_l0s_l1_en || chip->ss_en);
delink_stage1_cnt = chip->delink_stage1_step;
delink_stage2_cnt = delink_stage1_cnt + chip->delink_stage2_step;
delink_stage3_cnt = delink_stage2_cnt + chip->delink_stage3_step;
if (chip->auto_delink_cnt > delink_stage3_cnt)
return;
if (chip->auto_delink_cnt == delink_stage1_cnt)
rtsx_delink_stage1(chip, enter_L1, delink_stage3_cnt);
if (chip->auto_delink_cnt == delink_stage2_cnt) {
dev_dbg(rtsx_dev(chip), "Try to do force delink\n");
if (enter_L1)
rtsx_exit_L1(chip);
if (chip->asic_code && CHECK_PID(chip, 0x5208))
rtsx_set_phy_reg_bit(chip, 0x1C, 2);
rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0x0A);
}
chip->auto_delink_cnt++;
}
void rtsx_polling_func(struct rtsx_chip *chip)
{
if (rtsx_chk_stat(chip, RTSX_STAT_SUSPEND))
return;
if (rtsx_chk_stat(chip, RTSX_STAT_DELINK))
goto delink_stage;
if (chip->polling_config) {
u8 val;
rtsx_read_config_byte(chip, 0, &val);
}
if (rtsx_chk_stat(chip, RTSX_STAT_SS))
return;
rtsx_manage_ocp(chip);
rtsx_manage_sd_lock(chip);
rtsx_init_cards(chip);
rtsx_manage_ss(chip);
rtsx_manage_aspm(chip);
rtsx_manage_idle(chip);
switch (rtsx_get_stat(chip)) {
case RTSX_STAT_RUN:
#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
rtsx_blink_led(chip);
#endif
do_remaining_work(chip);
break;
case RTSX_STAT_IDLE:
if (chip->sd_io && !chip->sd_int)
try_to_switch_sdio_ctrl(chip);
rtsx_enable_aspm(chip);
break;
default:
break;
}
if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
rtsx_manage_2lun_mode(chip);
else
rtsx_manage_1lun_mode(chip);
delink_stage:
rtsx_delink_stage(chip);
}
/**
* rtsx_stop_cmd - stop command transfer and DMA transfer
* @chip: Realtek's card reader chip
* @card: flash card type
*
* Stop command transfer and DMA transfer.
* This function is called in error handler.
*/
void rtsx_stop_cmd(struct rtsx_chip *chip, int card)
{
int i;
for (i = 0; i <= 8; i++) {
int addr = RTSX_HCBAR + i * 4;
u32 reg;
reg = rtsx_readl(chip, addr);
dev_dbg(rtsx_dev(chip), "BAR (0x%02x): 0x%08x\n", addr, reg);
}
rtsx_writel(chip, RTSX_HCBCTLR, STOP_CMD);
rtsx_writel(chip, RTSX_HDBCTLR, STOP_DMA);
for (i = 0; i < 16; i++) {
u16 addr = 0xFE20 + (u16)i;
u8 val;
rtsx_read_register(chip, addr, &val);
dev_dbg(rtsx_dev(chip), "0x%04X: 0x%02x\n", addr, val);
}
rtsx_write_register(chip, DMACTL, 0x80, 0x80);
rtsx_write_register(chip, RBCTL, 0x80, 0x80);
}
#define MAX_RW_REG_CNT 1024
int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data)
{
int i;
u32 val = 3 << 30;
val |= (u32)(addr & 0x3FFF) << 16;
val |= (u32)mask << 8;
val |= (u32)data;
rtsx_writel(chip, RTSX_HAIMR, val);
for (i = 0; i < MAX_RW_REG_CNT; i++) {
val = rtsx_readl(chip, RTSX_HAIMR);
if ((val & BIT(31)) == 0) {
if (data != (u8)val)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
}
return STATUS_TIMEDOUT;
}
int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data)
{
u32 val = 2 << 30;
int i;
if (data)
*data = 0;
val |= (u32)(addr & 0x3FFF) << 16;
rtsx_writel(chip, RTSX_HAIMR, val);
for (i = 0; i < MAX_RW_REG_CNT; i++) {
val = rtsx_readl(chip, RTSX_HAIMR);
if ((val & BIT(31)) == 0)
break;
}
if (i >= MAX_RW_REG_CNT)
return STATUS_TIMEDOUT;
if (data)
*data = (u8)(val & 0xFF);
return STATUS_SUCCESS;
}
int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
u32 val)
{
int retval;
u8 mode = 0, tmp;
int i;
for (i = 0; i < 4; i++) {
if (mask & 0xFF) {
retval = rtsx_write_register(chip, CFGDATA0 + i,
0xFF,
(u8)(val & mask & 0xFF));
if (retval)
return retval;
mode |= (1 << i);
}
mask >>= 8;
val >>= 8;
}
if (mode) {
retval = rtsx_write_register(chip, CFGADDR0, 0xFF, (u8)addr);
if (retval)
return retval;
retval = rtsx_write_register(chip, CFGADDR1, 0xFF,
(u8)(addr >> 8));
if (retval)
return retval;
retval = rtsx_write_register(chip, CFGRWCTL, 0xFF,
0x80 | mode |
((func_no & 0x03) << 4));
if (retval)
return retval;
for (i = 0; i < MAX_RW_REG_CNT; i++) {
retval = rtsx_read_register(chip, CFGRWCTL, &tmp);
if (retval)
return retval;
if ((tmp & 0x80) == 0)
break;
}
}
return STATUS_SUCCESS;
}
int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val)
{
int retval;
int i;
u8 tmp;
u32 data = 0;
retval = rtsx_write_register(chip, CFGADDR0, 0xFF, (u8)addr);
if (retval)
return retval;
retval = rtsx_write_register(chip, CFGADDR1, 0xFF, (u8)(addr >> 8));
if (retval)
return retval;
retval = rtsx_write_register(chip, CFGRWCTL, 0xFF,
0x80 | ((func_no & 0x03) << 4));
if (retval)
return retval;
for (i = 0; i < MAX_RW_REG_CNT; i++) {
retval = rtsx_read_register(chip, CFGRWCTL, &tmp);
if (retval)
return retval;
if ((tmp & 0x80) == 0)
break;
}
for (i = 0; i < 4; i++) {
retval = rtsx_read_register(chip, CFGDATA0 + i, &tmp);
if (retval)
return retval;
data |= (u32)tmp << (i * 8);
}
if (val)
*val = data;
return STATUS_SUCCESS;
}
int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
int len)
{
u32 *data, *mask;
u16 offset = addr % 4;
u16 aligned_addr = addr - offset;
int dw_len, i, j;
int retval;
size_t size;
if (!buf)
return STATUS_NOMEM;
if ((len + offset) % 4)
dw_len = (len + offset) / 4 + 1;
else
dw_len = (len + offset) / 4;
dev_dbg(rtsx_dev(chip), "dw_len = %d\n", dw_len);
size = array_size(dw_len, 4);
data = vzalloc(size);
if (!data)
return STATUS_NOMEM;
mask = vzalloc(size);
if (!mask) {
vfree(data);
return STATUS_NOMEM;
}
j = 0;
for (i = 0; i < len; i++) {
mask[j] |= 0xFF << (offset * 8);
data[j] |= buf[i] << (offset * 8);
if (++offset == 4) {
j++;
offset = 0;
}
}
print_hex_dump_bytes(KBUILD_MODNAME ": ", DUMP_PREFIX_NONE, mask, size);
print_hex_dump_bytes(KBUILD_MODNAME ": ", DUMP_PREFIX_NONE, data, size);
for (i = 0; i < dw_len; i++) {
retval = rtsx_write_cfg_dw(chip, func, aligned_addr + i * 4,
mask[i], data[i]);
if (retval != STATUS_SUCCESS) {
vfree(data);
vfree(mask);
return STATUS_FAIL;
}
}
vfree(data);
vfree(mask);
return STATUS_SUCCESS;
}
int rtsx_read_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
int len)
{
u32 *data;
u16 offset = addr % 4;
u16 aligned_addr = addr - offset;
int dw_len, i, j;
int retval;
if ((len + offset) % 4)
dw_len = (len + offset) / 4 + 1;
else
dw_len = (len + offset) / 4;
dev_dbg(rtsx_dev(chip), "dw_len = %d\n", dw_len);
data = vmalloc(array_size(dw_len, 4));
if (!data)
return STATUS_NOMEM;
for (i = 0; i < dw_len; i++) {
retval = rtsx_read_cfg_dw(chip, func, aligned_addr + i * 4,
data + i);
if (retval != STATUS_SUCCESS) {
vfree(data);
return STATUS_FAIL;
}
}
if (buf) {
j = 0;
for (i = 0; i < len; i++) {
buf[i] = (u8)(data[j] >> (offset * 8));
if (++offset == 4) {
j++;
offset = 0;
}
}
}
vfree(data);
return STATUS_SUCCESS;
}
int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val)
{
int retval;
bool finished = false;
int i;
u8 tmp;
retval = rtsx_write_register(chip, PHYDATA0, 0xFF, (u8)val);
if (retval)
return retval;
retval = rtsx_write_register(chip, PHYDATA1, 0xFF, (u8)(val >> 8));
if (retval)
return retval;
retval = rtsx_write_register(chip, PHYADDR, 0xFF, addr);
if (retval)
return retval;
retval = rtsx_write_register(chip, PHYRWCTL, 0xFF, 0x81);
if (retval)
return retval;
for (i = 0; i < 100000; i++) {
retval = rtsx_read_register(chip, PHYRWCTL, &tmp);
if (retval)
return retval;
if (!(tmp & 0x80)) {
finished = true;
break;
}
}
if (!finished)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val)
{
int retval;
bool finished = false;
int i;
u16 data = 0;
u8 tmp;
retval = rtsx_write_register(chip, PHYADDR, 0xFF, addr);
if (retval)
return retval;
retval = rtsx_write_register(chip, PHYRWCTL, 0xFF, 0x80);
if (retval)
return retval;
for (i = 0; i < 100000; i++) {
retval = rtsx_read_register(chip, PHYRWCTL, &tmp);
if (retval)
return retval;
if (!(tmp & 0x80)) {
finished = true;
break;
}
}
if (!finished)
return STATUS_FAIL;
retval = rtsx_read_register(chip, PHYDATA0, &tmp);
if (retval)
return retval;
data = tmp;
retval = rtsx_read_register(chip, PHYDATA1, &tmp);
if (retval)
return retval;
data |= (u16)tmp << 8;
if (val)
*val = data;
return STATUS_SUCCESS;
}
int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val)
{
int retval;
int i;
u8 data = 0;
retval = rtsx_write_register(chip, EFUSE_CTRL, 0xFF, 0x80 | addr);
if (retval)
return retval;
for (i = 0; i < 100; i++) {
retval = rtsx_read_register(chip, EFUSE_CTRL, &data);
if (retval)
return retval;
if (!(data & 0x80))
break;
udelay(1);
}
if (data & 0x80)
return STATUS_TIMEDOUT;
retval = rtsx_read_register(chip, EFUSE_DATA, &data);
if (retval)
return retval;
if (val)
*val = data;
return STATUS_SUCCESS;
}
int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val)
{
int retval;
int i, j;
u8 data = 0, tmp = 0xFF;
for (i = 0; i < 8; i++) {
if (val & (u8)(1 << i))
continue;
tmp &= (~(u8)(1 << i));
dev_dbg(rtsx_dev(chip), "Write 0x%x to 0x%x\n", tmp, addr);
retval = rtsx_write_register(chip, EFUSE_DATA, 0xFF, tmp);
if (retval)
return retval;
retval = rtsx_write_register(chip, EFUSE_CTRL, 0xFF,
0xA0 | addr);
if (retval)
return retval;
for (j = 0; j < 100; j++) {
retval = rtsx_read_register(chip, EFUSE_CTRL, &data);
if (retval)
return retval;
if (!(data & 0x80))
break;
wait_timeout(3);
}
if (data & 0x80)
return STATUS_TIMEDOUT;
wait_timeout(5);
}
return STATUS_SUCCESS;
}
int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
{
int retval;
u16 value;
retval = rtsx_read_phy_register(chip, reg, &value);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (value & (1 << bit)) {
value &= ~(1 << bit);
retval = rtsx_write_phy_register(chip, reg, value);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
{
int retval;
u16 value;
retval = rtsx_read_phy_register(chip, reg, &value);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if ((value & (1 << bit)) == 0) {
value |= (1 << bit);
retval = rtsx_write_phy_register(chip, reg, value);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static void rtsx_handle_pm_dstate(struct rtsx_chip *chip, u8 dstate)
{
u32 ultmp;
dev_dbg(rtsx_dev(chip), "%04x set pm_dstate to %d\n",
chip->product_id, dstate);
if (CHK_SDIO_EXIST(chip)) {
u8 func_no;
if (CHECK_PID(chip, 0x5288))
func_no = 2;
else
func_no = 1;
rtsx_read_cfg_dw(chip, func_no, 0x84, &ultmp);
dev_dbg(rtsx_dev(chip), "pm_dstate of function %d: 0x%x\n",
(int)func_no, ultmp);
rtsx_write_cfg_dw(chip, func_no, 0x84, 0xFF, dstate);
}
rtsx_write_config_byte(chip, 0x44, dstate);
rtsx_write_config_byte(chip, 0x45, 0);
}
void rtsx_enter_L1(struct rtsx_chip *chip)
{
rtsx_handle_pm_dstate(chip, 2);
}
void rtsx_exit_L1(struct rtsx_chip *chip)
{
rtsx_write_config_byte(chip, 0x44, 0);
rtsx_write_config_byte(chip, 0x45, 0);
}
void rtsx_enter_ss(struct rtsx_chip *chip)
{
dev_dbg(rtsx_dev(chip), "Enter Selective Suspend State!\n");
rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
if (chip->power_down_in_ss) {
rtsx_power_off_card(chip);
rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
}
if (CHK_SDIO_EXIST(chip))
rtsx_write_cfg_dw(chip, CHECK_PID(chip, 0x5288) ? 2 : 1,
0xC0, 0xFF00, 0x0100);
if (chip->auto_delink_en) {
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x01, 0x01);
} else {
if (!chip->phy_debug_mode) {
u32 tmp;
tmp = rtsx_readl(chip, RTSX_BIER);
tmp |= CARD_INT;
rtsx_writel(chip, RTSX_BIER, tmp);
}
rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 0);
}
rtsx_enter_L1(chip);
RTSX_CLR_DELINK(chip);
rtsx_set_stat(chip, RTSX_STAT_SS);
}
void rtsx_exit_ss(struct rtsx_chip *chip)
{
dev_dbg(rtsx_dev(chip), "Exit Selective Suspend State!\n");
rtsx_exit_L1(chip);
if (chip->power_down_in_ss) {
rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
udelay(1000);
}
if (RTSX_TST_DELINK(chip)) {
chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
rtsx_reinit_cards(chip, 1);
RTSX_CLR_DELINK(chip);
} else if (chip->power_down_in_ss) {
chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
rtsx_reinit_cards(chip, 0);
}
}
int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
{
u32 status, int_enable;
bool exit_ss = false;
#ifdef SUPPORT_OCP
u32 ocp_int = 0;
ocp_int = OC_INT;
#endif
if (chip->ss_en) {
chip->ss_counter = 0;
if (rtsx_get_stat(chip) == RTSX_STAT_SS) {
exit_ss = true;
rtsx_exit_L1(chip);
rtsx_set_stat(chip, RTSX_STAT_RUN);
}
}
int_enable = rtsx_readl(chip, RTSX_BIER);
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
if (((chip->int_reg & int_enable) == 0) ||
chip->int_reg == 0xFFFFFFFF)
return STATUS_FAIL;
status = chip->int_reg &= (int_enable | 0x7FFFFF);
if (status & CARD_INT) {
chip->auto_delink_cnt = 0;
if (status & SD_INT) {
if (status & SD_EXIST) {
set_bit(SD_NR, &chip->need_reset);
} else {
set_bit(SD_NR, &chip->need_release);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
clear_bit(SD_NR, &chip->need_reset);
}
} else {
/*
* If multi-luns, it's possible that
* when plugging/unplugging one card
* there is another card which still
* exists in the slot. In this case,
* all existed cards should be reset.
*/
if (exit_ss && (status & SD_EXIST))
set_bit(SD_NR, &chip->need_reinit);
}
if (!CHECK_PID(chip, 0x5288) || CHECK_BARO_PKG(chip, QFN)) {
if (status & XD_INT) {
if (status & XD_EXIST) {
set_bit(XD_NR, &chip->need_reset);
} else {
set_bit(XD_NR, &chip->need_release);
chip->xd_reset_counter = 0;
chip->xd_show_cnt = 0;
clear_bit(XD_NR, &chip->need_reset);
}
} else {
if (exit_ss && (status & XD_EXIST))
set_bit(XD_NR, &chip->need_reinit);
}
}
if (status & MS_INT) {
if (status & MS_EXIST) {
set_bit(MS_NR, &chip->need_reset);
} else {
set_bit(MS_NR, &chip->need_release);
chip->ms_reset_counter = 0;
chip->ms_show_cnt = 0;
clear_bit(MS_NR, &chip->need_reset);
}
} else {
if (exit_ss && (status & MS_EXIST))
set_bit(MS_NR, &chip->need_reinit);
}
}
#ifdef SUPPORT_OCP
chip->ocp_int = ocp_int & status;
#endif
if (chip->sd_io && (chip->int_reg & DATA_DONE_INT))
chip->int_reg &= ~(u32)DATA_DONE_INT;
return STATUS_SUCCESS;
}
void rtsx_do_before_power_down(struct rtsx_chip *chip, int pm_stat)
{
int retval;
dev_dbg(rtsx_dev(chip), "%s, pm_stat = %d\n", __func__, pm_stat);
rtsx_set_stat(chip, RTSX_STAT_SUSPEND);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS)
return;
rtsx_release_cards(chip);
rtsx_disable_bus_int(chip);
turn_off_led(chip, LED_GPIO);
#ifdef HW_AUTO_SWITCH_SD_BUS
if (chip->sd_io) {
chip->sdio_in_charge = 1;
if (CHECK_PID(chip, 0x5208)) {
rtsx_write_register(chip, TLPTISTAT, 0x08, 0x08);
/* Enable sdio_bus_auto_switch */
rtsx_write_register(chip, 0xFE70, 0x80, 0x80);
} else if (CHECK_PID(chip, 0x5288)) {
rtsx_write_register(chip, TLPTISTAT, 0x08, 0x08);
/* Enable sdio_bus_auto_switch */
rtsx_write_register(chip, 0xFE5A, 0x08, 0x08);
}
}
#endif
if (CHECK_PID(chip, 0x5208) && chip->ic_version >= IC_VER_D) {
/* u_force_clkreq_0 */
rtsx_write_register(chip, PETXCFG, 0x08, 0x08);
}
if (pm_stat == PM_S1) {
dev_dbg(rtsx_dev(chip), "Host enter S1\n");
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03,
HOST_ENTER_S1);
} else if (pm_stat == PM_S3) {
if (chip->s3_pwr_off_delay > 0)
wait_timeout(chip->s3_pwr_off_delay);
dev_dbg(rtsx_dev(chip), "Host enter S3\n");
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03,
HOST_ENTER_S3);
}
if (chip->do_delink_before_power_down && chip->auto_delink_en)
rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 2);
rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
chip->cur_clk = 0;
chip->cur_card = 0;
chip->card_exist = 0;
}
void rtsx_enable_aspm(struct rtsx_chip *chip)
{
if (chip->aspm_l0s_l1_en && chip->dynamic_aspm && !chip->aspm_enabled) {
dev_dbg(rtsx_dev(chip), "Try to enable ASPM\n");
chip->aspm_enabled = 1;
if (chip->asic_code && CHECK_PID(chip, 0x5208))
rtsx_write_phy_register(chip, 0x07, 0);
if (CHECK_PID(chip, 0x5208)) {
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xF3,
0x30 | chip->aspm_level[0]);
} else {
rtsx_write_config_byte(chip, LCTLR,
chip->aspm_l0s_l1_en);
}
if (CHK_SDIO_EXIST(chip)) {
u16 val = chip->aspm_l0s_l1_en | 0x0100;
rtsx_write_cfg_dw(chip, CHECK_PID(chip, 0x5288) ? 2 : 1,
0xC0, 0xFFF, val);
}
}
}
void rtsx_disable_aspm(struct rtsx_chip *chip)
{
if (CHECK_PID(chip, 0x5208))
rtsx_monitor_aspm_config(chip);
if (chip->aspm_l0s_l1_en && chip->dynamic_aspm && chip->aspm_enabled) {
dev_dbg(rtsx_dev(chip), "Try to disable ASPM\n");
chip->aspm_enabled = 0;
if (chip->asic_code && CHECK_PID(chip, 0x5208))
rtsx_write_phy_register(chip, 0x07, 0x0129);
if (CHECK_PID(chip, 0x5208))
rtsx_write_register(chip, ASPM_FORCE_CTL,
0xF3, 0x30);
else
rtsx_write_config_byte(chip, LCTLR, 0x00);
wait_timeout(1);
}
}
int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
{
int retval;
int i, j;
u16 reg_addr;
u8 *ptr;
if (!buf)
return STATUS_ERROR;
ptr = buf;
reg_addr = PPBUF_BASE2;
for (i = 0; i < buf_len / 256; i++) {
rtsx_init_cmd(chip);
for (j = 0; j < 256; j++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr++, 0, 0);
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0)
return STATUS_FAIL;
memcpy(ptr, rtsx_get_cmd_data(chip), 256);
ptr += 256;
}
if (buf_len % 256) {
rtsx_init_cmd(chip);
for (j = 0; j < buf_len % 256; j++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr++, 0, 0);
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0)
return STATUS_FAIL;
}
memcpy(ptr, rtsx_get_cmd_data(chip), buf_len % 256);
return STATUS_SUCCESS;
}
int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
{
int retval;
int i, j;
u16 reg_addr;
u8 *ptr;
if (!buf)
return STATUS_ERROR;
ptr = buf;
reg_addr = PPBUF_BASE2;
for (i = 0; i < buf_len / 256; i++) {
rtsx_init_cmd(chip);
for (j = 0; j < 256; j++) {
rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF,
*ptr);
ptr++;
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0)
return STATUS_FAIL;
}
if (buf_len % 256) {
rtsx_init_cmd(chip);
for (j = 0; j < buf_len % 256; j++) {
rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF,
*ptr);
ptr++;
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
int rtsx_check_chip_exist(struct rtsx_chip *chip)
{
if (rtsx_readl(chip, 0) == 0xFFFFFFFF)
return STATUS_FAIL;
return STATUS_SUCCESS;
}
int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl)
{
int retval;
u8 mask = 0;
if (ctl & SSC_PDCTL)
mask |= SSC_POWER_DOWN;
#ifdef SUPPORT_OCP
if (ctl & OC_PDCTL) {
mask |= SD_OC_POWER_DOWN;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
mask |= MS_OC_POWER_DOWN;
}
#endif
if (mask) {
retval = rtsx_write_register(chip, FPDCTL, mask, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
if (CHECK_PID(chip, 0x5288))
wait_timeout(200);
}
return STATUS_SUCCESS;
}
int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl)
{
int retval;
u8 mask = 0, val = 0;
if (ctl & SSC_PDCTL)
mask |= SSC_POWER_DOWN;
#ifdef SUPPORT_OCP
if (ctl & OC_PDCTL) {
mask |= SD_OC_POWER_DOWN;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
mask |= MS_OC_POWER_DOWN;
}
#endif
if (mask) {
val = mask;
retval = rtsx_write_register(chip, FPDCTL, mask, val);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
| linux-master | drivers/staging/rts5208/rtsx_chip.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG ([email protected])
* Micky Ching ([email protected])
*/
#include "general.h"
int bit1cnt_long(u32 data)
{
int i, cnt = 0;
for (i = 0; i < 32; i++) {
if (data & 0x01)
cnt++;
data >>= 1;
}
return cnt;
}
| linux-master | drivers/staging/rts5208/general.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG ([email protected])
* Micky Ching ([email protected])
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include "rtsx.h"
#include "sd.h"
#include "ms.h"
#include "spi.h"
void scsi_show_command(struct rtsx_chip *chip)
{
struct scsi_cmnd *srb = chip->srb;
char *what = NULL;
bool unknown_cmd = false;
int len;
switch (srb->cmnd[0]) {
case TEST_UNIT_READY:
what = "TEST_UNIT_READY";
break;
case REZERO_UNIT:
what = "REZERO_UNIT";
break;
case REQUEST_SENSE:
what = "REQUEST_SENSE";
break;
case FORMAT_UNIT:
what = "FORMAT_UNIT";
break;
case READ_BLOCK_LIMITS:
what = "READ_BLOCK_LIMITS";
break;
case REASSIGN_BLOCKS:
what = "REASSIGN_BLOCKS";
break;
case READ_6:
what = "READ_6";
break;
case WRITE_6:
what = "WRITE_6";
break;
case SEEK_6:
what = "SEEK_6";
break;
case READ_REVERSE:
what = "READ_REVERSE";
break;
case WRITE_FILEMARKS:
what = "WRITE_FILEMARKS";
break;
case SPACE:
what = "SPACE";
break;
case INQUIRY:
what = "INQUIRY";
break;
case RECOVER_BUFFERED_DATA:
what = "RECOVER_BUFFERED_DATA";
break;
case MODE_SELECT:
what = "MODE_SELECT";
break;
case RESERVE:
what = "RESERVE";
break;
case RELEASE:
what = "RELEASE";
break;
case COPY:
what = "COPY";
break;
case ERASE:
what = "ERASE";
break;
case MODE_SENSE:
what = "MODE_SENSE";
break;
case START_STOP:
what = "START_STOP";
break;
case RECEIVE_DIAGNOSTIC:
what = "RECEIVE_DIAGNOSTIC";
break;
case SEND_DIAGNOSTIC:
what = "SEND_DIAGNOSTIC";
break;
case ALLOW_MEDIUM_REMOVAL:
what = "ALLOW_MEDIUM_REMOVAL";
break;
case SET_WINDOW:
what = "SET_WINDOW";
break;
case READ_CAPACITY:
what = "READ_CAPACITY";
break;
case READ_10:
what = "READ_10";
break;
case WRITE_10:
what = "WRITE_10";
break;
case SEEK_10:
what = "SEEK_10";
break;
case WRITE_VERIFY:
what = "WRITE_VERIFY";
break;
case VERIFY:
what = "VERIFY";
break;
case SEARCH_HIGH:
what = "SEARCH_HIGH";
break;
case SEARCH_EQUAL:
what = "SEARCH_EQUAL";
break;
case SEARCH_LOW:
what = "SEARCH_LOW";
break;
case SET_LIMITS:
what = "SET_LIMITS";
break;
case READ_POSITION:
what = "READ_POSITION";
break;
case SYNCHRONIZE_CACHE:
what = "SYNCHRONIZE_CACHE";
break;
case LOCK_UNLOCK_CACHE:
what = "LOCK_UNLOCK_CACHE";
break;
case READ_DEFECT_DATA:
what = "READ_DEFECT_DATA";
break;
case MEDIUM_SCAN:
what = "MEDIUM_SCAN";
break;
case COMPARE:
what = "COMPARE";
break;
case COPY_VERIFY:
what = "COPY_VERIFY";
break;
case WRITE_BUFFER:
what = "WRITE_BUFFER";
break;
case READ_BUFFER:
what = "READ_BUFFER";
break;
case UPDATE_BLOCK:
what = "UPDATE_BLOCK";
break;
case READ_LONG:
what = "READ_LONG";
break;
case WRITE_LONG:
what = "WRITE_LONG";
break;
case CHANGE_DEFINITION:
what = "CHANGE_DEFINITION";
break;
case WRITE_SAME:
what = "WRITE_SAME";
break;
case GPCMD_READ_SUBCHANNEL:
what = "READ SUBCHANNEL";
break;
case READ_TOC:
what = "READ_TOC";
break;
case GPCMD_READ_HEADER:
what = "READ HEADER";
break;
case GPCMD_PLAY_AUDIO_10:
what = "PLAY AUDIO (10)";
break;
case GPCMD_PLAY_AUDIO_MSF:
what = "PLAY AUDIO MSF";
break;
case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
what = "GET EVENT/STATUS NOTIFICATION";
break;
case GPCMD_PAUSE_RESUME:
what = "PAUSE/RESUME";
break;
case LOG_SELECT:
what = "LOG_SELECT";
break;
case LOG_SENSE:
what = "LOG_SENSE";
break;
case GPCMD_STOP_PLAY_SCAN:
what = "STOP PLAY/SCAN";
break;
case GPCMD_READ_DISC_INFO:
what = "READ DISC INFORMATION";
break;
case GPCMD_READ_TRACK_RZONE_INFO:
what = "READ TRACK INFORMATION";
break;
case GPCMD_RESERVE_RZONE_TRACK:
what = "RESERVE TRACK";
break;
case GPCMD_SEND_OPC:
what = "SEND OPC";
break;
case MODE_SELECT_10:
what = "MODE_SELECT_10";
break;
case GPCMD_REPAIR_RZONE_TRACK:
what = "REPAIR TRACK";
break;
case 0x59:
what = "READ MASTER CUE";
break;
case MODE_SENSE_10:
what = "MODE_SENSE_10";
break;
case GPCMD_CLOSE_TRACK:
what = "CLOSE TRACK/SESSION";
break;
case 0x5C:
what = "READ BUFFER CAPACITY";
break;
case 0x5D:
what = "SEND CUE SHEET";
break;
case GPCMD_BLANK:
what = "BLANK";
break;
case REPORT_LUNS:
what = "REPORT LUNS";
break;
case MOVE_MEDIUM:
what = "MOVE_MEDIUM or PLAY AUDIO (12)";
break;
case READ_12:
what = "READ_12";
break;
case WRITE_12:
what = "WRITE_12";
break;
case WRITE_VERIFY_12:
what = "WRITE_VERIFY_12";
break;
case SEARCH_HIGH_12:
what = "SEARCH_HIGH_12";
break;
case SEARCH_EQUAL_12:
what = "SEARCH_EQUAL_12";
break;
case SEARCH_LOW_12:
what = "SEARCH_LOW_12";
break;
case SEND_VOLUME_TAG:
what = "SEND_VOLUME_TAG";
break;
case READ_ELEMENT_STATUS:
what = "READ_ELEMENT_STATUS";
break;
case GPCMD_READ_CD_MSF:
what = "READ CD MSF";
break;
case GPCMD_SCAN:
what = "SCAN";
break;
case GPCMD_SET_SPEED:
what = "SET CD SPEED";
break;
case GPCMD_MECHANISM_STATUS:
what = "MECHANISM STATUS";
break;
case GPCMD_READ_CD:
what = "READ CD";
break;
case 0xE1:
what = "WRITE CONTINUE";
break;
case WRITE_LONG_2:
what = "WRITE_LONG_2";
break;
case VENDOR_CMND:
what = "Realtek's vendor command";
break;
default:
what = "(unknown command)";
unknown_cmd = true;
break;
}
if (srb->cmnd[0] != TEST_UNIT_READY)
dev_dbg(rtsx_dev(chip), "Command %s (%d bytes)\n",
what, srb->cmd_len);
if (unknown_cmd) {
len = min_t(unsigned short, srb->cmd_len, 16);
dev_dbg(rtsx_dev(chip), "%*ph\n", len, srb->cmnd);
}
}
void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
{
switch (sense_type) {
case SENSE_TYPE_MEDIA_CHANGE:
set_sense_data(chip, lun, CUR_ERR, 0x06, 0, 0x28, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_NOT_PRESENT:
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x3A, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_LBA_OVER_RANGE:
set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x21, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT:
set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x25, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_WRITE_PROTECT:
set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x27, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR:
set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x11, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_WRITE_ERR:
set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x02, 0, 0);
break;
case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
break;
case SENSE_TYPE_FORMAT_IN_PROGRESS:
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 0, 0);
break;
case SENSE_TYPE_FORMAT_CMD_FAILED:
set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x31, 0x01, 0, 0);
break;
#ifdef SUPPORT_MAGIC_GATE
case SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB:
set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x02, 0, 0);
break;
case SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN:
set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x00, 0, 0);
break;
case SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM:
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x30, 0x00, 0, 0);
break;
case SENSE_TYPE_MG_WRITE_ERR:
set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x00, 0, 0);
break;
#endif
#ifdef SUPPORT_SD_LOCK
case SENSE_TYPE_MEDIA_READ_FORBIDDEN:
set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x11, 0x13, 0, 0);
break;
#endif
case SENSE_TYPE_NO_SENSE:
default:
set_sense_data(chip, lun, CUR_ERR, 0, 0, 0, 0, 0, 0);
break;
}
}
void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
u16 sns_key_info1)
{
struct sense_data_t *sense = &chip->sense_buffer[lun];
sense->err_code = err_code;
sense->sense_key = sense_key;
sense->info[0] = (u8)(info >> 24);
sense->info[1] = (u8)(info >> 16);
sense->info[2] = (u8)(info >> 8);
sense->info[3] = (u8)info;
sense->ad_sense_len = sizeof(struct sense_data_t) - 8;
sense->asc = asc;
sense->ascq = ascq;
if (sns_key_info0 != 0) {
sense->sns_key_info[0] = SKSV | sns_key_info0;
sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 4;
sense->sns_key_info[2] = sns_key_info1 & 0x0f;
}
}
static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
if (!(CHK_BIT(chip->lun_mc, lun))) {
SET_BIT(chip->lun_mc, lun);
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
#ifdef SUPPORT_SD_LOCK
if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
struct sd_info *sd_card = &chip->sd_card;
if (sd_card->sd_lock_notify) {
sd_card->sd_lock_notify = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
} else if (sd_card->sd_lock_status & SD_LOCKED) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_READ_FORBIDDEN);
return TRANSPORT_FAILED;
}
}
#endif
return TRANSPORT_GOOD;
}
static unsigned char formatter_inquiry_str[20] = {
'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K',
#ifdef SUPPORT_MAGIC_GATE
'-', 'M', 'G', /* Byte[47:49] */
#else
0x20, 0x20, 0x20, /* Byte[47:49] */
#endif
#ifdef SUPPORT_MAGIC_GATE
0x0B, /* Byte[50]: MG, MS, MSPro, MSXC */
#else
0x09, /* Byte[50]: MS, MSPro, MSXC */
#endif
0x00, /* Byte[51]: Category Specific Commands */
0x00, /* Byte[52]: Access Control and feature */
0x20, 0x20, 0x20, /* Byte[53:55] */
};
static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
char *inquiry_default = (char *)"Generic-xD/SD/M.S. 1.00 ";
char *inquiry_sdms = (char *)"Generic-SD/MemoryStick 1.00 ";
char *inquiry_sd = (char *)"Generic-SD/MMC 1.00 ";
char *inquiry_ms = (char *)"Generic-MemoryStick 1.00 ";
char *inquiry_string;
unsigned char sendbytes;
unsigned char *buf;
u8 card = get_lun_card(chip, lun);
bool pro_formatter_flag = false;
unsigned char inquiry_buf[] = {
QULIFIRE | DRCT_ACCESS_DEV,
RMB_DISC | 0x0D,
0x00,
0x01,
0x1f,
0x02,
0,
REL_ADR | WBUS_32 | WBUS_16 | SYNC | LINKED | CMD_QUE | SFT_RE,
};
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
if (chip->lun2card[lun] == SD_CARD)
inquiry_string = inquiry_sd;
else
inquiry_string = inquiry_ms;
} else if (CHECK_LUN_MODE(chip, SD_MS_1LUN)) {
inquiry_string = inquiry_sdms;
} else {
inquiry_string = inquiry_default;
}
buf = vmalloc(scsi_bufflen(srb));
if (!buf)
return TRANSPORT_ERROR;
#ifdef SUPPORT_MAGIC_GATE
if (chip->mspro_formatter_enable &&
(chip->lun2card[lun] & MS_CARD))
#else
if (chip->mspro_formatter_enable)
#endif
if (!card || card == MS_CARD)
pro_formatter_flag = true;
if (pro_formatter_flag) {
if (scsi_bufflen(srb) < 56)
sendbytes = (unsigned char)(scsi_bufflen(srb));
else
sendbytes = 56;
} else {
if (scsi_bufflen(srb) < 36)
sendbytes = (unsigned char)(scsi_bufflen(srb));
else
sendbytes = 36;
}
if (sendbytes > 8) {
memcpy(buf, inquiry_buf, 8);
strncpy(buf + 8, inquiry_string, sendbytes - 8);
if (pro_formatter_flag) {
/* Additional Length */
buf[4] = 0x33;
}
} else {
memcpy(buf, inquiry_buf, sendbytes);
}
if (pro_formatter_flag) {
if (sendbytes > 36)
memcpy(buf + 36, formatter_inquiry_str, sendbytes - 36);
}
scsi_set_resid(srb, 0);
rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
vfree(buf);
return TRANSPORT_GOOD;
}
static int start_stop_unit(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
scsi_set_resid(srb, scsi_bufflen(srb));
if (srb->cmnd[1] == 1)
return TRANSPORT_GOOD;
switch (srb->cmnd[0x4]) {
case STOP_MEDIUM:
/* Media disabled */
return TRANSPORT_GOOD;
case UNLOAD_MEDIUM:
/* Media shall be unload */
if (check_card_ready(chip, lun))
eject_card(chip, lun);
return TRANSPORT_GOOD;
case MAKE_MEDIUM_READY:
case LOAD_MEDIUM:
if (check_card_ready(chip, lun))
return TRANSPORT_GOOD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
break;
}
return TRANSPORT_ERROR;
}
static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int prevent;
prevent = srb->cmnd[4] & 0x1;
scsi_set_resid(srb, 0);
if (prevent) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
return TRANSPORT_GOOD;
}
static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sense_data_t *sense;
unsigned int lun = SCSI_LUN(srb);
struct ms_info *ms_card = &chip->ms_card;
unsigned char *tmp, *buf;
sense = &chip->sense_buffer[lun];
if ((get_lun_card(chip, lun) == MS_CARD) &&
ms_card->pro_under_formatting) {
if (ms_card->format_status == FORMAT_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
ms_card->pro_under_formatting = 0;
ms_card->progress = 0;
} else if (ms_card->format_status == FORMAT_IN_PROGRESS) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
0, (u16)(ms_card->progress));
} else {
/* Format Command Failed */
set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
ms_card->pro_under_formatting = 0;
ms_card->progress = 0;
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
}
buf = vmalloc(scsi_bufflen(srb));
if (!buf)
return TRANSPORT_ERROR;
tmp = (unsigned char *)sense;
memcpy(buf, tmp, scsi_bufflen(srb));
rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
vfree(buf);
scsi_set_resid(srb, 0);
/* Reset Sense Data */
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
return TRANSPORT_GOOD;
}
static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
int lun, u8 *buf, int buf_len)
{
struct ms_info *ms_card = &chip->ms_card;
int sys_info_offset;
int data_size = buf_len;
bool support_format = false;
int i = 0;
if (cmd == MODE_SENSE) {
sys_info_offset = 8;
if (data_size > 0x68)
data_size = 0x68;
buf[i++] = 0x67; /* Mode Data Length */
} else {
sys_info_offset = 12;
if (data_size > 0x6C)
data_size = 0x6C;
buf[i++] = 0x00; /* Mode Data Length (MSB) */
buf[i++] = 0x6A; /* Mode Data Length (LSB) */
}
/* Medium Type Code */
if (check_card_ready(chip, lun)) {
if (CHK_MSXC(ms_card)) {
support_format = true;
buf[i++] = 0x40;
} else if (CHK_MSPRO(ms_card)) {
support_format = true;
buf[i++] = 0x20;
} else {
buf[i++] = 0x10;
}
/* WP */
if (check_card_wp(chip, lun))
buf[i++] = 0x80;
else
buf[i++] = 0x00;
} else {
buf[i++] = 0x00; /* MediaType */
buf[i++] = 0x00; /* WP */
}
buf[i++] = 0x00; /* Reserved */
if (cmd == MODE_SENSE_10) {
buf[i++] = 0x00; /* Reserved */
buf[i++] = 0x00; /* Block descriptor length(MSB) */
buf[i++] = 0x00; /* Block descriptor length(LSB) */
/* The Following Data is the content of "Page 0x20" */
if (data_size >= 9)
buf[i++] = 0x20; /* Page Code */
if (data_size >= 10)
buf[i++] = 0x62; /* Page Length */
if (data_size >= 11)
buf[i++] = 0x00; /* No Access Control */
if (data_size >= 12) {
if (support_format)
buf[i++] = 0xC0; /* SF, SGM */
else
buf[i++] = 0x00;
}
} else {
/* The Following Data is the content of "Page 0x20" */
if (data_size >= 5)
buf[i++] = 0x20; /* Page Code */
if (data_size >= 6)
buf[i++] = 0x62; /* Page Length */
if (data_size >= 7)
buf[i++] = 0x00; /* No Access Control */
if (data_size >= 8) {
if (support_format)
buf[i++] = 0xC0; /* SF, SGM */
else
buf[i++] = 0x00;
}
}
if (data_size > sys_info_offset) {
/* 96 Bytes Attribute Data */
int len = data_size - sys_info_offset;
len = (len < 96) ? len : 96;
memcpy(buf + sys_info_offset, ms_card->raw_sys_info, len);
}
}
static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
unsigned int data_size;
int status;
bool pro_formatter_flag;
unsigned char page_code, *buf;
u8 card = get_lun_card(chip, lun);
#ifndef SUPPORT_MAGIC_GATE
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
scsi_set_resid(srb, scsi_bufflen(srb));
return TRANSPORT_FAILED;
}
#endif
pro_formatter_flag = false;
data_size = 8;
#ifdef SUPPORT_MAGIC_GATE
if ((chip->lun2card[lun] & MS_CARD)) {
if (!card || card == MS_CARD) {
data_size = 108;
if (chip->mspro_formatter_enable)
pro_formatter_flag = true;
}
}
#else
if (card == MS_CARD) {
if (chip->mspro_formatter_enable) {
pro_formatter_flag = true;
data_size = 108;
}
}
#endif
buf = kmalloc(data_size, GFP_KERNEL);
if (!buf)
return TRANSPORT_ERROR;
page_code = srb->cmnd[2] & 0x3f;
if (page_code == 0x3F || page_code == 0x1C ||
page_code == 0x00 ||
(pro_formatter_flag && page_code == 0x20)) {
if (srb->cmnd[0] == MODE_SENSE) {
if (page_code == 0x3F || page_code == 0x20) {
ms_mode_sense(chip, srb->cmnd[0],
lun, buf, data_size);
} else {
data_size = 4;
buf[0] = 0x03;
buf[1] = 0x00;
if (check_card_wp(chip, lun))
buf[2] = 0x80;
else
buf[2] = 0x00;
buf[3] = 0x00;
}
} else {
if (page_code == 0x3F || page_code == 0x20) {
ms_mode_sense(chip, srb->cmnd[0],
lun, buf, data_size);
} else {
data_size = 8;
buf[0] = 0x00;
buf[1] = 0x06;
buf[2] = 0x00;
if (check_card_wp(chip, lun))
buf[3] = 0x80;
else
buf[3] = 0x00;
buf[4] = 0x00;
buf[5] = 0x00;
buf[6] = 0x00;
buf[7] = 0x00;
}
}
status = TRANSPORT_GOOD;
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
scsi_set_resid(srb, scsi_bufflen(srb));
status = TRANSPORT_FAILED;
}
if (status == TRANSPORT_GOOD) {
unsigned int len = min_t(unsigned int, scsi_bufflen(srb),
data_size);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
}
kfree(buf);
return status;
}
static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
struct sd_info *sd_card = &chip->sd_card;
#endif
unsigned int lun = SCSI_LUN(srb);
int retval;
u32 start_sec;
u16 sec_cnt;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
if (!check_card_ready(chip, lun) || (get_card_size(chip, lun) == 0)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
if (!(CHK_BIT(chip->lun_mc, lun))) {
SET_BIT(chip->lun_mc, lun);
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
#ifdef SUPPORT_SD_LOCK
if (sd_card->sd_erase_status) {
/* Accessing to any card is forbidden
* until the erase procedure of SD is completed
*/
dev_dbg(rtsx_dev(chip), "SD card being erased!\n");
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN);
return TRANSPORT_FAILED;
}
if (get_lun_card(chip, lun) == SD_CARD) {
if (sd_card->sd_lock_status & SD_LOCKED) {
dev_dbg(rtsx_dev(chip), "SD card locked!\n");
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_READ_FORBIDDEN);
return TRANSPORT_FAILED;
}
}
#endif
if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) {
start_sec = ((u32)srb->cmnd[2] << 24) |
((u32)srb->cmnd[3] << 16) |
((u32)srb->cmnd[4] << 8) | ((u32)srb->cmnd[5]);
sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
} else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
start_sec = ((u32)(srb->cmnd[1] & 0x1F) << 16) |
((u32)srb->cmnd[2] << 8) | ((u32)srb->cmnd[3]);
sec_cnt = srb->cmnd[4];
if (sec_cnt == 0)
sec_cnt = 256;
} else if ((srb->cmnd[0] == VENDOR_CMND) &&
(srb->cmnd[1] == SCSI_APP_CMD) &&
((srb->cmnd[2] == PP_READ10) || (srb->cmnd[2] == PP_WRITE10))) {
start_sec = ((u32)srb->cmnd[4] << 24) |
((u32)srb->cmnd[5] << 16) |
((u32)srb->cmnd[6] << 8) | ((u32)srb->cmnd[7]);
sec_cnt = ((u16)(srb->cmnd[9]) << 8) | srb->cmnd[10];
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
/* In some test, we will receive a start_sec like 0xFFFFFFFF.
* In this situation, start_sec + sec_cnt will overflow, so we
* need to judge start_sec at first
*/
if (start_sec > get_card_size(chip, lun) ||
((start_sec + sec_cnt) > get_card_size(chip, lun))) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
return TRANSPORT_FAILED;
}
if (sec_cnt == 0) {
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
if (chip->rw_fail_cnt[lun] == 3) {
dev_dbg(rtsx_dev(chip), "read/write fail three times in succession\n");
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
if (srb->sc_data_direction == DMA_TO_DEVICE) {
if (check_card_wp(chip, lun)) {
dev_dbg(rtsx_dev(chip), "Write protected card!\n");
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_PROTECT);
return TRANSPORT_FAILED;
}
}
retval = card_rw(srb, chip, start_sec, sec_cnt);
if (retval != STATUS_SUCCESS) {
if (chip->need_release & chip->lun2card[lun]) {
chip->rw_fail_cnt[lun] = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
} else {
chip->rw_fail_cnt[lun]++;
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type
(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
}
retval = TRANSPORT_FAILED;
goto exit;
} else {
chip->rw_fail_cnt[lun] = 0;
retval = TRANSPORT_GOOD;
}
scsi_set_resid(srb, 0);
exit:
return retval;
}
static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned char *buf;
unsigned int lun = SCSI_LUN(srb);
unsigned int buf_len;
u8 card = get_lun_card(chip, lun);
u32 card_size;
int desc_cnt;
int i = 0;
if (!check_card_ready(chip, lun)) {
if (!chip->mspro_formatter_enable) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
}
buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12;
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf)
return TRANSPORT_ERROR;
buf[i++] = 0;
buf[i++] = 0;
buf[i++] = 0;
/* Capacity List Length */
if (buf_len > 12 && chip->mspro_formatter_enable &&
(chip->lun2card[lun] & MS_CARD) &&
(!card || card == MS_CARD)) {
buf[i++] = 0x10;
desc_cnt = 2;
} else {
buf[i++] = 0x08;
desc_cnt = 1;
}
while (desc_cnt) {
if (check_card_ready(chip, lun)) {
card_size = get_card_size(chip, lun);
buf[i++] = (unsigned char)(card_size >> 24);
buf[i++] = (unsigned char)(card_size >> 16);
buf[i++] = (unsigned char)(card_size >> 8);
buf[i++] = (unsigned char)card_size;
if (desc_cnt == 2)
buf[i++] = 2;
else
buf[i++] = 0;
} else {
buf[i++] = 0xFF;
buf[i++] = 0xFF;
buf[i++] = 0xFF;
buf[i++] = 0xFF;
if (desc_cnt == 2)
buf[i++] = 3;
else
buf[i++] = 0;
}
buf[i++] = 0x00;
buf[i++] = 0x02;
buf[i++] = 0x00;
desc_cnt--;
}
buf_len = min_t(unsigned int, scsi_bufflen(srb), buf_len);
rtsx_stor_set_xfer_buf(buf, buf_len, srb);
kfree(buf);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
return TRANSPORT_GOOD;
}
static int read_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned char *buf;
unsigned int lun = SCSI_LUN(srb);
u32 card_size;
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
if (!(CHK_BIT(chip->lun_mc, lun))) {
SET_BIT(chip->lun_mc, lun);
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
buf = kmalloc(8, GFP_KERNEL);
if (!buf)
return TRANSPORT_ERROR;
card_size = get_card_size(chip, lun);
buf[0] = (unsigned char)((card_size - 1) >> 24);
buf[1] = (unsigned char)((card_size - 1) >> 16);
buf[2] = (unsigned char)((card_size - 1) >> 8);
buf[3] = (unsigned char)(card_size - 1);
buf[4] = 0x00;
buf[5] = 0x00;
buf[6] = 0x02;
buf[7] = 0x00;
rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
kfree(buf);
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return TRANSPORT_FAILED;
}
for (i = 0; i < len; i++) {
retval = spi_read_eeprom(chip, i, buf + i);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return TRANSPORT_FAILED;
}
}
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
return TRANSPORT_GOOD;
}
static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
if (len == 511) {
retval = spi_erase_eeprom_chip(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
} else {
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb),
len);
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
for (i = 0; i < len; i++) {
retval = spi_write_eeprom(chip, i, buf[i]);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
}
vfree(buf);
}
return TRANSPORT_GOOD;
}
static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[2] << 8) | srb->cmnd[3];
len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
if (addr < 0xFC00) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
for (i = 0; i < len; i++) {
retval = rtsx_read_register(chip, addr + i, buf + i);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return TRANSPORT_FAILED;
}
}
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
return TRANSPORT_GOOD;
}
static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[2] << 8) | srb->cmnd[3];
len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
if (addr < 0xFC00) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
for (i = 0; i < len; i++) {
retval = rtsx_write_register(chip, addr + i, 0xFF, buf[i]);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
}
vfree(buf);
return TRANSPORT_GOOD;
}
static int get_sd_csd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
if (get_lun_card(chip, lun) != SD_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return TRANSPORT_FAILED;
}
scsi_set_resid(srb, 0);
rtsx_stor_set_xfer_buf(sd_card->raw_csd, scsi_bufflen(srb), srb);
return TRANSPORT_GOOD;
}
static int toggle_gpio_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
u8 gpio = srb->cmnd[2];
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
if (gpio > 3)
gpio = 1;
toggle_gpio(chip, gpio);
return TRANSPORT_GOOD;
}
static int read_host_reg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
u8 addr, buf[4];
u32 val;
unsigned int len;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = srb->cmnd[4];
val = rtsx_readl(chip, addr);
dev_dbg(rtsx_dev(chip), "Host register (0x%x): 0x%x\n", addr, val);
buf[0] = (u8)(val >> 24);
buf[1] = (u8)(val >> 16);
buf[2] = (u8)(val >> 8);
buf[3] = (u8)val;
len = min_t(unsigned int, scsi_bufflen(srb), 4);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
return TRANSPORT_GOOD;
}
static int write_host_reg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
u8 addr, buf[4];
u32 val;
unsigned int len;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = srb->cmnd[4];
len = min_t(unsigned int, scsi_bufflen(srb), 4);
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
val = ((u32)buf[0] << 24) | ((u32)buf[1] << 16) | ((u32)buf[2]
<< 8) | buf[3];
rtsx_writel(chip, addr, val);
return TRANSPORT_GOOD;
}
static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
if (srb->cmnd[3] == 1) {
/* Variable Clock */
struct xd_info *xd_card = &chip->xd_card;
struct sd_info *sd_card = &chip->sd_card;
struct ms_info *ms_card = &chip->ms_card;
switch (srb->cmnd[4]) {
case XD_CARD:
xd_card->xd_clock = srb->cmnd[5];
break;
case SD_CARD:
sd_card->sd_clock = srb->cmnd[5];
break;
case MS_CARD:
ms_card->ms_clock = srb->cmnd[5];
break;
default:
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
} else if (srb->cmnd[3] == 2) {
if (srb->cmnd[4]) {
chip->blink_led = 1;
} else {
int retval;
chip->blink_led = 0;
rtsx_disable_aspm(chip);
if (chip->ss_en &&
(rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
turn_off_led(chip, LED_GPIO);
}
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
return TRANSPORT_GOOD;
}
static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
if (srb->cmnd[3] == 1) {
struct xd_info *xd_card = &chip->xd_card;
struct sd_info *sd_card = &chip->sd_card;
struct ms_info *ms_card = &chip->ms_card;
u8 tmp;
switch (srb->cmnd[4]) {
case XD_CARD:
tmp = (u8)(xd_card->xd_clock);
break;
case SD_CARD:
tmp = (u8)(sd_card->sd_clock);
break;
case MS_CARD:
tmp = (u8)(ms_card->ms_clock);
break;
default:
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
rtsx_stor_set_xfer_buf(&tmp, 1, srb);
} else if (srb->cmnd[3] == 2) {
u8 tmp = chip->blink_led;
rtsx_stor_set_xfer_buf(&tmp, 1, srb);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
return TRANSPORT_GOOD;
}
static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
unsigned int lun = SCSI_LUN(srb);
u16 len;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
len = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
len = min_t(u16, len, scsi_bufflen(srb));
if (srb->sc_data_direction == DMA_FROM_DEVICE)
dev_dbg(rtsx_dev(chip), "Read from device\n");
else
dev_dbg(rtsx_dev(chip), "Write to device\n");
retval = rtsx_transfer_data(chip, 0, scsi_sglist(srb), len,
scsi_sg_count(srb), srb->sc_data_direction,
1000);
if (retval < 0) {
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sd_info *sd_card = &chip->sd_card;
struct ms_info *ms_card = &chip->ms_card;
int buf_len;
unsigned int lun = SCSI_LUN(srb);
u8 card = get_lun_card(chip, lun);
u8 status[32];
#ifdef SUPPORT_OCP
u8 oc_now_mask = 0, oc_ever_mask = 0;
#endif
memset(status, 0, 32);
status[0] = (u8)(chip->product_id);
status[1] = chip->ic_version;
if (chip->auto_delink_en)
status[2] = 0x10;
else
status[2] = 0x00;
status[3] = 20;
status[4] = 10;
status[5] = 05;
status[6] = 21;
if (chip->card_wp)
status[7] = 0x20;
else
status[7] = 0x00;
#ifdef SUPPORT_OCP
status[8] = 0;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN) &&
chip->lun2card[lun] == MS_CARD) {
oc_now_mask = MS_OC_NOW;
oc_ever_mask = MS_OC_EVER;
} else {
oc_now_mask = SD_OC_NOW;
oc_ever_mask = SD_OC_EVER;
}
if (chip->ocp_stat & oc_now_mask)
status[8] |= 0x02;
if (chip->ocp_stat & oc_ever_mask)
status[8] |= 0x01;
#endif
if (card == SD_CARD) {
if (CHK_SD(sd_card)) {
if (CHK_SD_HCXC(sd_card)) {
if (sd_card->capacity > 0x4000000)
status[0x0E] = 0x02;
else
status[0x0E] = 0x01;
} else {
status[0x0E] = 0x00;
}
if (CHK_SD_SDR104(sd_card))
status[0x0F] = 0x03;
else if (CHK_SD_DDR50(sd_card))
status[0x0F] = 0x04;
else if (CHK_SD_SDR50(sd_card))
status[0x0F] = 0x02;
else if (CHK_SD_HS(sd_card))
status[0x0F] = 0x01;
else
status[0x0F] = 0x00;
} else {
if (CHK_MMC_SECTOR_MODE(sd_card))
status[0x0E] = 0x01;
else
status[0x0E] = 0x00;
if (CHK_MMC_DDR52(sd_card))
status[0x0F] = 0x03;
else if (CHK_MMC_52M(sd_card))
status[0x0F] = 0x02;
else if (CHK_MMC_26M(sd_card))
status[0x0F] = 0x01;
else
status[0x0F] = 0x00;
}
} else if (card == MS_CARD) {
if (CHK_MSPRO(ms_card)) {
if (CHK_MSXC(ms_card))
status[0x0E] = 0x01;
else
status[0x0E] = 0x00;
if (CHK_HG8BIT(ms_card))
status[0x0F] = 0x01;
else
status[0x0F] = 0x00;
}
}
#ifdef SUPPORT_SD_LOCK
if (card == SD_CARD) {
status[0x17] = 0x80;
if (sd_card->sd_erase_status)
status[0x17] |= 0x01;
if (sd_card->sd_lock_status & SD_LOCKED) {
status[0x17] |= 0x02;
status[0x07] |= 0x40;
}
if (sd_card->sd_lock_status & SD_PWD_EXIST)
status[0x17] |= 0x04;
} else {
status[0x17] = 0x00;
}
dev_dbg(rtsx_dev(chip), "status[0x17] = 0x%x\n", status[0x17]);
#endif
status[0x18] = 0x8A;
status[0x1A] = 0x28;
#ifdef SUPPORT_SD_LOCK
status[0x1F] = 0x01;
#endif
buf_len = min_t(unsigned int, scsi_bufflen(srb), sizeof(status));
rtsx_stor_set_xfer_buf(status, buf_len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
return TRANSPORT_GOOD;
}
static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int phy_debug_mode;
int retval;
u16 reg;
if (!CHECK_PID(chip, 0x5208)) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
phy_debug_mode = (int)(srb->cmnd[3]);
if (phy_debug_mode) {
chip->phy_debug_mode = 1;
retval = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
rtsx_disable_bus_int(chip);
retval = rtsx_read_phy_register(chip, 0x1C, ®);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
reg |= 0x0001;
retval = rtsx_write_phy_register(chip, 0x1C, reg);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
} else {
chip->phy_debug_mode = 0;
retval = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0x77);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
rtsx_enable_bus_int(chip);
retval = rtsx_read_phy_register(chip, 0x1C, ®);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
reg &= 0xFFFE;
retval = rtsx_write_phy_register(chip, 0x1C, reg);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
}
return TRANSPORT_GOOD;
}
static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval = STATUS_SUCCESS;
unsigned int lun = SCSI_LUN(srb);
u8 cmd_type, mask, value, idx;
u16 addr;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
switch (srb->cmnd[3]) {
case INIT_BATCHCMD:
rtsx_init_cmd(chip);
break;
case ADD_BATCHCMD:
cmd_type = srb->cmnd[4];
if (cmd_type > 2) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
addr = (srb->cmnd[5] << 8) | srb->cmnd[6];
mask = srb->cmnd[7];
value = srb->cmnd[8];
rtsx_add_cmd(chip, cmd_type, addr, mask, value);
break;
case SEND_BATCHCMD:
retval = rtsx_send_cmd(chip, 0, 1000);
break;
case GET_BATCHRSP:
idx = srb->cmnd[4];
value = *(rtsx_get_cmd_data(chip) + idx);
if (scsi_bufflen(srb) < 1) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
rtsx_stor_set_xfer_buf(&value, 1, srb);
scsi_set_resid(srb, 0);
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
return TRANSPORT_GOOD;
}
static int suit_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
switch (srb->cmnd[3]) {
case INIT_BATCHCMD:
case ADD_BATCHCMD:
case SEND_BATCHCMD:
case GET_BATCHRSP:
return rw_mem_cmd_buf(srb, chip);
default:
return TRANSPORT_ERROR;
}
}
static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
u16 val;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
if (len % 2)
len -= len % 2;
if (len) {
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return TRANSPORT_FAILED;
}
for (i = 0; i < len / 2; i++) {
retval = rtsx_read_phy_register(chip, addr + i, &val);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type
(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return TRANSPORT_FAILED;
}
buf[2 * i] = (u8)(val >> 8);
buf[2 * i + 1] = (u8)val;
}
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb),
len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
}
return TRANSPORT_GOOD;
}
static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
u16 val;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
if (len % 2)
len -= len % 2;
if (len) {
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb),
len);
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
for (i = 0; i < len / 2; i++) {
val = ((u16)buf[2 * i] << 8) | buf[2 * i + 1];
retval = rtsx_write_phy_register(chip, addr + i, val);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
}
vfree(buf);
}
return TRANSPORT_GOOD;
}
static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr;
int retval;
u8 mode;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
mode = srb->cmnd[3];
addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
if (mode == 0) {
retval = spi_erase_eeprom_chip(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
} else if (mode == 1) {
retval = spi_erase_eeprom_byte(chip, addr);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
} else {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
return TRANSPORT_GOOD;
}
static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return TRANSPORT_FAILED;
}
for (i = 0; i < len; i++) {
retval = spi_read_eeprom(chip, addr + i, buf + i);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return TRANSPORT_FAILED;
}
}
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
return TRANSPORT_GOOD;
}
static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
for (i = 0; i < len; i++) {
retval = spi_write_eeprom(chip, addr + i, buf[i]);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
return TRANSPORT_FAILED;
}
}
vfree(buf);
return TRANSPORT_GOOD;
}
static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
u8 addr, len, i;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = srb->cmnd[4];
len = srb->cmnd[5];
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return TRANSPORT_FAILED;
}
for (i = 0; i < len; i++) {
retval = rtsx_read_efuse(chip, addr + i, buf + i);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return TRANSPORT_FAILED;
}
}
len = (u8)min_t(unsigned int, scsi_bufflen(srb), len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
return TRANSPORT_GOOD;
}
static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval, result = TRANSPORT_GOOD;
u16 val;
u8 addr, len, i;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = srb->cmnd[4];
len = srb->cmnd[5];
len = (u8)min_t(unsigned int, scsi_bufflen(srb), len);
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
return TRANSPORT_ERROR;
}
if (chip->asic_code) {
retval = rtsx_read_phy_register(chip, 0x08, &val);
if (retval != STATUS_SUCCESS) {
vfree(buf);
return TRANSPORT_ERROR;
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
LDO3318_PWR_MASK, LDO_OFF);
if (retval != STATUS_SUCCESS) {
vfree(buf);
return TRANSPORT_ERROR;
}
wait_timeout(600);
retval = rtsx_write_phy_register(chip, 0x08,
0x4C00 | chip->phy_voltage);
if (retval != STATUS_SUCCESS) {
vfree(buf);
return TRANSPORT_ERROR;
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
LDO3318_PWR_MASK, LDO_ON);
if (retval != STATUS_SUCCESS) {
vfree(buf);
return TRANSPORT_ERROR;
}
wait_timeout(600);
}
retval = card_power_on(chip, SPI_CARD);
if (retval != STATUS_SUCCESS) {
vfree(buf);
return TRANSPORT_ERROR;
}
wait_timeout(50);
for (i = 0; i < len; i++) {
retval = rtsx_write_efuse(chip, addr + i, buf[i]);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
result = TRANSPORT_FAILED;
goto exit;
}
}
exit:
vfree(buf);
retval = card_power_off(chip, SPI_CARD);
if (retval != STATUS_SUCCESS)
return TRANSPORT_ERROR;
if (chip->asic_code) {
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
LDO3318_PWR_MASK, LDO_OFF);
if (retval != STATUS_SUCCESS)
return TRANSPORT_ERROR;
wait_timeout(600);
retval = rtsx_write_phy_register(chip, 0x08, val);
if (retval != STATUS_SUCCESS)
return TRANSPORT_ERROR;
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
LDO3318_PWR_MASK, LDO_ON);
if (retval != STATUS_SUCCESS)
return TRANSPORT_ERROR;
}
return result;
}
static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
bool func_max;
u8 func;
u16 addr, len;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
func = srb->cmnd[3];
addr = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
len = ((u16)(srb->cmnd[6]) << 8) | srb->cmnd[7];
dev_dbg(rtsx_dev(chip), "%s: func = %d, addr = 0x%x, len = %d\n",
__func__, func, addr, len);
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip))
func_max = true;
else
func_max = false;
if (func > func_max) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
retval = rtsx_read_cfg_seq(chip, func, addr, buf, len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
vfree(buf);
return TRANSPORT_FAILED;
}
len = (u16)min_t(unsigned int, scsi_bufflen(srb), len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
return TRANSPORT_GOOD;
}
static int write_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
bool func_max;
u8 func;
u16 addr, len;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
func = srb->cmnd[3];
addr = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
len = ((u16)(srb->cmnd[6]) << 8) | srb->cmnd[7];
dev_dbg(rtsx_dev(chip), "%s: func = %d, addr = 0x%x\n",
__func__, func, addr);
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip))
func_max = true;
else
func_max = false;
if (func > func_max) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
buf = vmalloc(len);
if (!buf)
return TRANSPORT_ERROR;
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
retval = rtsx_write_cfg_seq(chip, func, addr, buf, len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
vfree(buf);
return TRANSPORT_FAILED;
}
vfree(buf);
return TRANSPORT_GOOD;
}
static int app_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int result;
switch (srb->cmnd[2]) {
case PP_READ10:
case PP_WRITE10:
result = read_write(srb, chip);
break;
case READ_HOST_REG:
result = read_host_reg(srb, chip);
break;
case WRITE_HOST_REG:
result = write_host_reg(srb, chip);
break;
case GET_VAR:
result = get_variable(srb, chip);
break;
case SET_VAR:
result = set_variable(srb, chip);
break;
case DMA_READ:
case DMA_WRITE:
result = dma_access_ring_buffer(srb, chip);
break;
case READ_PHY:
result = read_phy_register(srb, chip);
break;
case WRITE_PHY:
result = write_phy_register(srb, chip);
break;
case ERASE_EEPROM2:
result = erase_eeprom2(srb, chip);
break;
case READ_EEPROM2:
result = read_eeprom2(srb, chip);
break;
case WRITE_EEPROM2:
result = write_eeprom2(srb, chip);
break;
case READ_EFUSE:
result = read_efuse(srb, chip);
break;
case WRITE_EFUSE:
result = write_efuse(srb, chip);
break;
case READ_CFG:
result = read_cfg_byte(srb, chip);
break;
case WRITE_CFG:
result = write_cfg_byte(srb, chip);
break;
case SET_CHIP_MODE:
result = set_chip_mode(srb, chip);
break;
case SUIT_CMD:
result = suit_cmd(srb, chip);
break;
case GET_DEV_STATUS:
result = get_dev_status(srb, chip);
break;
default:
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
return result;
}
static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
u8 rtsx_status[16];
int buf_len;
unsigned int lun = SCSI_LUN(srb);
rtsx_status[0] = (u8)(chip->vendor_id >> 8);
rtsx_status[1] = (u8)(chip->vendor_id);
rtsx_status[2] = (u8)(chip->product_id >> 8);
rtsx_status[3] = (u8)(chip->product_id);
rtsx_status[4] = (u8)lun;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
if (chip->lun2card[lun] == SD_CARD)
rtsx_status[5] = 2;
else
rtsx_status[5] = 3;
} else {
if (chip->card_exist) {
if (chip->card_exist & XD_CARD)
rtsx_status[5] = 4;
else if (chip->card_exist & SD_CARD)
rtsx_status[5] = 2;
else if (chip->card_exist & MS_CARD)
rtsx_status[5] = 3;
else
rtsx_status[5] = 7;
} else {
rtsx_status[5] = 7;
}
}
if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
rtsx_status[6] = 2;
else
rtsx_status[6] = 1;
rtsx_status[7] = (u8)(chip->product_id);
rtsx_status[8] = chip->ic_version;
if (check_card_exist(chip, lun))
rtsx_status[9] = 1;
else
rtsx_status[9] = 0;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
rtsx_status[10] = 0;
else
rtsx_status[10] = 1;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
if (chip->lun2card[lun] == SD_CARD)
rtsx_status[11] = SD_CARD;
else
rtsx_status[11] = MS_CARD;
} else {
rtsx_status[11] = XD_CARD | SD_CARD | MS_CARD;
}
if (check_card_ready(chip, lun))
rtsx_status[12] = 1;
else
rtsx_status[12] = 0;
if (get_lun_card(chip, lun) == XD_CARD) {
rtsx_status[13] = 0x40;
} else if (get_lun_card(chip, lun) == SD_CARD) {
struct sd_info *sd_card = &chip->sd_card;
rtsx_status[13] = 0x20;
if (CHK_SD(sd_card)) {
if (CHK_SD_HCXC(sd_card))
rtsx_status[13] |= 0x04;
if (CHK_SD_HS(sd_card))
rtsx_status[13] |= 0x02;
} else {
rtsx_status[13] |= 0x08;
if (CHK_MMC_52M(sd_card))
rtsx_status[13] |= 0x02;
if (CHK_MMC_SECTOR_MODE(sd_card))
rtsx_status[13] |= 0x04;
}
} else if (get_lun_card(chip, lun) == MS_CARD) {
struct ms_info *ms_card = &chip->ms_card;
if (CHK_MSPRO(ms_card)) {
rtsx_status[13] = 0x38;
if (CHK_HG8BIT(ms_card))
rtsx_status[13] |= 0x04;
#ifdef SUPPORT_MSXC
if (CHK_MSXC(ms_card))
rtsx_status[13] |= 0x01;
#endif
} else {
rtsx_status[13] = 0x30;
}
} else {
if (CHECK_LUN_MODE(chip, DEFAULT_SINGLE)) {
#ifdef SUPPORT_SDIO
if (chip->sd_io && chip->sd_int)
rtsx_status[13] = 0x60;
else
rtsx_status[13] = 0x70;
#else
rtsx_status[13] = 0x70;
#endif
} else {
if (chip->lun2card[lun] == SD_CARD)
rtsx_status[13] = 0x20;
else
rtsx_status[13] = 0x30;
}
}
rtsx_status[14] = 0x78;
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip))
rtsx_status[15] = 0x83;
else
rtsx_status[15] = 0x82;
buf_len = min_t(unsigned int, scsi_bufflen(srb), sizeof(rtsx_status));
rtsx_stor_set_xfer_buf(rtsx_status, buf_len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
return TRANSPORT_GOOD;
}
static int get_card_bus_width(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
u8 card, bus_width;
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
card = get_lun_card(chip, lun);
if (card == SD_CARD || card == MS_CARD) {
bus_width = chip->card_bus_width[lun];
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
return TRANSPORT_FAILED;
}
scsi_set_resid(srb, 0);
rtsx_stor_set_xfer_buf(&bus_width, scsi_bufflen(srb), srb);
return TRANSPORT_GOOD;
}
static int spi_vendor_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int result;
unsigned int lun = SCSI_LUN(srb);
u8 gpio_dir;
if (CHECK_PID(chip, 0x5208) || CHECK_PID(chip, 0x5288)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
rtsx_force_power_on(chip, SSC_PDCTL);
rtsx_read_register(chip, CARD_GPIO_DIR, &gpio_dir);
rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir & 0x06);
switch (srb->cmnd[2]) {
case SCSI_SPI_GETSTATUS:
result = spi_get_status(srb, chip);
break;
case SCSI_SPI_SETPARAMETER:
result = spi_set_parameter(srb, chip);
break;
case SCSI_SPI_READFALSHID:
result = spi_read_flash_id(srb, chip);
break;
case SCSI_SPI_READFLASH:
result = spi_read_flash(srb, chip);
break;
case SCSI_SPI_WRITEFLASH:
result = spi_write_flash(srb, chip);
break;
case SCSI_SPI_WRITEFLASHSTATUS:
result = spi_write_flash_status(srb, chip);
break;
case SCSI_SPI_ERASEFLASH:
result = spi_erase_flash(srb, chip);
break;
default:
rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir);
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir);
if (result != STATUS_SUCCESS)
return TRANSPORT_FAILED;
return TRANSPORT_GOOD;
}
static int vendor_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int result;
switch (srb->cmnd[1]) {
case READ_STATUS:
result = read_status(srb, chip);
break;
case READ_MEM:
result = read_mem(srb, chip);
break;
case WRITE_MEM:
result = write_mem(srb, chip);
break;
case READ_EEPROM:
result = read_eeprom(srb, chip);
break;
case WRITE_EEPROM:
result = write_eeprom(srb, chip);
break;
case TOGGLE_GPIO:
result = toggle_gpio_cmd(srb, chip);
break;
case GET_SD_CSD:
result = get_sd_csd(srb, chip);
break;
case GET_BUS_WIDTH:
result = get_card_bus_width(srb, chip);
break;
case SCSI_APP_CMD:
result = app_cmd(srb, chip);
break;
case SPI_VENDOR_COMMAND:
result = spi_vendor_cmd(srb, chip);
break;
default:
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
return result;
}
#if !defined(LED_AUTO_BLINK) && !defined(REGULAR_BLINK)
void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
u16 sec_cnt;
if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) {
sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
} else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
sec_cnt = srb->cmnd[4];
if (sec_cnt == 0)
sec_cnt = 256;
} else {
return;
}
if (chip->rw_cap[lun] >= GPIO_TOGGLE_THRESHOLD) {
toggle_gpio(chip, LED_GPIO);
chip->rw_cap[lun] = 0;
} else {
chip->rw_cap[lun] += sec_cnt;
}
}
#endif
static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
bool quick_format;
int retval;
if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
return TRANSPORT_FAILED;
}
if (srb->cmnd[3] != 0x4D || srb->cmnd[4] != 0x47 ||
srb->cmnd[5] != 0x66 || srb->cmnd[6] != 0x6D ||
srb->cmnd[7] != 0x74) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
if (!check_card_ready(chip, lun) ||
(get_card_size(chip, lun) == 0)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
if (srb->cmnd[8] & 0x01)
quick_format = false;
else
quick_format = true;
if (!(chip->card_ready & MS_CARD)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
if (chip->card_wp & MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
return TRANSPORT_FAILED;
}
if (!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
return TRANSPORT_FAILED;
}
retval = mspro_format(srb, chip, MS_SHORT_DATA_LEN, quick_format);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
return TRANSPORT_FAILED;
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
#ifdef SUPPORT_PCGL_1P18
static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
u8 dev_info_id, data_len;
u8 *buf;
unsigned int buf_len;
int i;
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
return TRANSPORT_FAILED;
}
if (srb->cmnd[2] != 0xB0 || srb->cmnd[4] != 0x4D ||
srb->cmnd[5] != 0x53 || srb->cmnd[6] != 0x49 ||
srb->cmnd[7] != 0x44) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
dev_info_id = srb->cmnd[3];
if ((CHK_MSXC(ms_card) && dev_info_id == 0x10) ||
(!CHK_MSXC(ms_card) && dev_info_id == 0x13) ||
!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
if (dev_info_id == 0x15) {
buf_len = 0x3C;
data_len = 0x3A;
} else {
buf_len = 0x6C;
data_len = 0x6A;
}
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf)
return TRANSPORT_ERROR;
i = 0;
/* GET Memory Stick Media Information Response Header */
buf[i++] = 0x00; /* Data length MSB */
buf[i++] = data_len; /* Data length LSB */
/* Device Information Type Code */
if (CHK_MSXC(ms_card))
buf[i++] = 0x03;
else
buf[i++] = 0x02;
/* SGM bit */
buf[i++] = 0x01;
/* Reserved */
buf[i++] = 0x00;
buf[i++] = 0x00;
buf[i++] = 0x00;
/* Number of Device Information */
buf[i++] = 0x01;
/* Device Information Body */
/* Device Information ID Number */
buf[i++] = dev_info_id;
/* Device Information Length */
if (dev_info_id == 0x15)
data_len = 0x31;
else
data_len = 0x61;
buf[i++] = 0x00; /* Data length MSB */
buf[i++] = data_len; /* Data length LSB */
/* Valid Bit */
buf[i++] = 0x80;
if (dev_info_id == 0x10 || dev_info_id == 0x13) {
/* System Information */
memcpy(buf + i, ms_card->raw_sys_info, 96);
} else {
/* Model Name */
memcpy(buf + i, ms_card->raw_model_name, 48);
}
rtsx_stor_set_xfer_buf(buf, buf_len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
kfree(buf);
return STATUS_SUCCESS;
}
#endif
static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval = TRANSPORT_ERROR;
if (srb->cmnd[2] == MS_FORMAT)
retval = ms_format_cmnd(srb, chip);
#ifdef SUPPORT_PCGL_1P18
else if (srb->cmnd[2] == GET_MS_INFORMATION)
retval = get_ms_information(srb, chip);
#endif
return retval;
}
#ifdef SUPPORT_CPRM
static int sd_extension_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
int result;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
sd_cleanup_work(chip);
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
if (get_lun_card(chip, lun) != SD_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
return TRANSPORT_FAILED;
}
switch (srb->cmnd[0]) {
case SD_PASS_THRU_MODE:
result = sd_pass_thru_mode(srb, chip);
break;
case SD_EXECUTE_NO_DATA:
result = sd_execute_no_data(srb, chip);
break;
case SD_EXECUTE_READ:
result = sd_execute_read_data(srb, chip);
break;
case SD_EXECUTE_WRITE:
result = sd_execute_write_data(srb, chip);
break;
case SD_GET_RSP:
result = sd_get_cmd_rsp(srb, chip);
break;
case SD_HW_RST:
result = sd_hw_rst(srb, chip);
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
return result;
}
#endif
#ifdef SUPPORT_MAGIC_GATE
static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
int retval;
u8 key_format;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
ms_cleanup_work(chip);
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
return TRANSPORT_FAILED;
}
if (srb->cmnd[7] != KC_MG_R_PRO) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
if (!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
return TRANSPORT_FAILED;
}
key_format = srb->cmnd[10] & 0x3F;
dev_dbg(rtsx_dev(chip), "key_format = 0x%x\n", key_format);
switch (key_format) {
case KF_GET_LOC_EKB:
if ((scsi_bufflen(srb) == 0x41C) &&
srb->cmnd[8] == 0x04 &&
srb->cmnd[9] == 0x1C) {
retval = mg_get_local_EKB(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
break;
case KF_RSP_CHG:
if ((scsi_bufflen(srb) == 0x24) &&
srb->cmnd[8] == 0x00 &&
srb->cmnd[9] == 0x24) {
retval = mg_get_rsp_chg(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
break;
case KF_GET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
srb->cmnd[8] == 0x04 &&
srb->cmnd[9] == 0x04 &&
srb->cmnd[2] == 0x00 &&
srb->cmnd[3] == 0x00 &&
srb->cmnd[4] == 0x00 &&
srb->cmnd[5] < 32) {
retval = mg_get_ICV(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
int retval;
u8 key_format;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
ms_cleanup_work(chip);
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
if (check_card_wp(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
return TRANSPORT_FAILED;
}
if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
return TRANSPORT_FAILED;
}
if (srb->cmnd[7] != KC_MG_R_PRO) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
if (!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
return TRANSPORT_FAILED;
}
key_format = srb->cmnd[10] & 0x3F;
dev_dbg(rtsx_dev(chip), "key_format = 0x%x\n", key_format);
switch (key_format) {
case KF_SET_LEAF_ID:
if ((scsi_bufflen(srb) == 0x0C) &&
srb->cmnd[8] == 0x00 &&
srb->cmnd[9] == 0x0C) {
retval = mg_set_leaf_id(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
break;
case KF_CHG_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
srb->cmnd[8] == 0x00 &&
srb->cmnd[9] == 0x0C) {
retval = mg_chg(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
break;
case KF_RSP_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
srb->cmnd[8] == 0x00 &&
srb->cmnd[9] == 0x0C) {
retval = mg_rsp(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
break;
case KF_SET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
srb->cmnd[8] == 0x04 &&
srb->cmnd[9] == 0x04 &&
srb->cmnd[2] == 0x00 &&
srb->cmnd[3] == 0x00 &&
srb->cmnd[4] == 0x00 &&
srb->cmnd[5] < 32) {
retval = mg_set_ICV(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
#endif
int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
struct sd_info *sd_card = &chip->sd_card;
#endif
struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
int result;
#ifdef SUPPORT_SD_LOCK
if (sd_card->sd_erase_status) {
/* Block all SCSI command except for
* REQUEST_SENSE and rs_ppstatus
*/
if (!(srb->cmnd[0] == VENDOR_CMND &&
srb->cmnd[1] == SCSI_APP_CMD &&
srb->cmnd[2] == GET_DEV_STATUS) &&
srb->cmnd[0] != REQUEST_SENSE) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR,
0x02, 0, 0x04, 0x04, 0, 0);
return TRANSPORT_FAILED;
}
}
#endif
if ((get_lun_card(chip, lun) == MS_CARD) &&
ms_card->format_status == FORMAT_IN_PROGRESS) {
if (srb->cmnd[0] != REQUEST_SENSE &&
srb->cmnd[0] != INQUIRY) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
0, (u16)(ms_card->progress));
return TRANSPORT_FAILED;
}
}
switch (srb->cmnd[0]) {
case READ_10:
case WRITE_10:
case READ_6:
case WRITE_6:
result = read_write(srb, chip);
#if !defined(LED_AUTO_BLINK) && !defined(REGULAR_BLINK)
led_shine(srb, chip);
#endif
break;
case TEST_UNIT_READY:
result = test_unit_ready(srb, chip);
break;
case INQUIRY:
result = inquiry(srb, chip);
break;
case READ_CAPACITY:
result = read_capacity(srb, chip);
break;
case START_STOP:
result = start_stop_unit(srb, chip);
break;
case ALLOW_MEDIUM_REMOVAL:
result = allow_medium_removal(srb, chip);
break;
case REQUEST_SENSE:
result = request_sense(srb, chip);
break;
case MODE_SENSE:
case MODE_SENSE_10:
result = mode_sense(srb, chip);
break;
case 0x23:
result = read_format_capacity(srb, chip);
break;
case VENDOR_CMND:
result = vendor_cmnd(srb, chip);
break;
case MS_SP_CMND:
result = ms_sp_cmnd(srb, chip);
break;
#ifdef SUPPORT_CPRM
case SD_PASS_THRU_MODE:
case SD_EXECUTE_NO_DATA:
case SD_EXECUTE_READ:
case SD_EXECUTE_WRITE:
case SD_GET_RSP:
case SD_HW_RST:
result = sd_extension_cmnd(srb, chip);
break;
#endif
#ifdef SUPPORT_MAGIC_GATE
case CMD_MSPRO_MG_RKEY:
result = mg_report_key(srb, chip);
break;
case CMD_MSPRO_MG_SKEY:
result = mg_send_key(srb, chip);
break;
#endif
case FORMAT_UNIT:
case MODE_SELECT:
case VERIFY:
result = TRANSPORT_GOOD;
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
result = TRANSPORT_FAILED;
}
return result;
}
| linux-master | drivers/staging/rts5208/rtsx_scsi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* VMEbus User access driver
*
* Author: Martyn Welch <[email protected]>
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* Based on work by:
* Tom Armistead and Ajit Prem
* Copyright 2004 Motorola Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/refcount.h>
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include "vme.h"
#include "vme_user.h"
static const char driver_name[] = "vme_user";
static int bus[VME_USER_BUS_MAX];
static unsigned int bus_num;
/* Currently Documentation/admin-guide/devices.rst defines the
* following for VME:
*
* 221 char VME bus
* 0 = /dev/bus/vme/m0 First master image
* 1 = /dev/bus/vme/m1 Second master image
* 2 = /dev/bus/vme/m2 Third master image
* 3 = /dev/bus/vme/m3 Fourth master image
* 4 = /dev/bus/vme/s0 First slave image
* 5 = /dev/bus/vme/s1 Second slave image
* 6 = /dev/bus/vme/s2 Third slave image
* 7 = /dev/bus/vme/s3 Fourth slave image
* 8 = /dev/bus/vme/ctl Control
*
* It is expected that all VME bus drivers will use the
* same interface. For interface documentation see
* http://www.vmelinux.org/.
*
* However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
* even support the tsi148 chipset (which has 8 master and 8 slave windows).
* We'll run with this for now as far as possible, however it probably makes
* sense to get rid of the old mappings and just do everything dynamically.
*
* So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
* defined above and try to support at least some of the interface from
* http://www.vmelinux.org/ as an alternative the driver can be written
* providing a saner interface later.
*
* The vmelinux.org driver never supported slave images, the devices reserved
* for slaves were repurposed to support all 8 master images on the UniverseII!
* We shall support 4 masters and 4 slaves with this driver.
*/
#define VME_MAJOR 221 /* VME Major Device Number */
#define VME_DEVS 9 /* Number of dev entries */
#define MASTER_MINOR 0
#define MASTER_MAX 3
#define SLAVE_MINOR 4
#define SLAVE_MAX 7
#define CONTROL_MINOR 8
#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
/*
* Structure to handle image related parameters.
*/
struct image_desc {
void *kern_buf; /* Buffer address in kernel space */
dma_addr_t pci_buf; /* Buffer address in PCI address space */
unsigned long long size_buf; /* Buffer size */
struct mutex mutex; /* Mutex for locking image */
struct device *device; /* Sysfs device */
struct vme_resource *resource; /* VME resource */
int mmap_count; /* Number of current mmap's */
};
static struct image_desc image[VME_DEVS];
static struct cdev *vme_user_cdev; /* Character device */
static struct class *vme_user_sysfs_class; /* Sysfs class */
static struct vme_dev *vme_user_bridge; /* Pointer to user device */
static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
MASTER_MINOR, MASTER_MINOR,
SLAVE_MINOR, SLAVE_MINOR,
SLAVE_MINOR, SLAVE_MINOR,
CONTROL_MINOR
};
struct vme_user_vma_priv {
unsigned int minor;
refcount_t refcnt;
};
static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
loff_t *ppos)
{
ssize_t copied = 0;
if (count > image[minor].size_buf)
count = image[minor].size_buf;
copied = vme_master_read(image[minor].resource, image[minor].kern_buf,
count, *ppos);
if (copied < 0)
return (int)copied;
if (copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
return -EFAULT;
return copied;
}
static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
size_t count, loff_t *ppos)
{
if (count > image[minor].size_buf)
count = image[minor].size_buf;
if (copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
return -EFAULT;
return vme_master_write(image[minor].resource, image[minor].kern_buf,
count, *ppos);
}
static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
size_t count, loff_t *ppos)
{
void *image_ptr;
image_ptr = image[minor].kern_buf + *ppos;
if (copy_to_user(buf, image_ptr, (unsigned long)count))
return -EFAULT;
return count;
}
static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
size_t count, loff_t *ppos)
{
void *image_ptr;
image_ptr = image[minor].kern_buf + *ppos;
if (copy_from_user(image_ptr, buf, (unsigned long)count))
return -EFAULT;
return count;
}
static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
unsigned int minor = iminor(file_inode(file));
ssize_t retval;
size_t image_size;
if (minor == CONTROL_MINOR)
return 0;
mutex_lock(&image[minor].mutex);
/* XXX Do we *really* want this helper - we can use vme_*_get ? */
image_size = vme_get_size(image[minor].resource);
/* Ensure we are starting at a valid location */
if ((*ppos < 0) || (*ppos > (image_size - 1))) {
mutex_unlock(&image[minor].mutex);
return 0;
}
/* Ensure not reading past end of the image */
if (*ppos + count > image_size)
count = image_size - *ppos;
switch (type[minor]) {
case MASTER_MINOR:
retval = resource_to_user(minor, buf, count, ppos);
break;
case SLAVE_MINOR:
retval = buffer_to_user(minor, buf, count, ppos);
break;
default:
retval = -EINVAL;
}
mutex_unlock(&image[minor].mutex);
if (retval > 0)
*ppos += retval;
return retval;
}
static ssize_t vme_user_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file_inode(file));
ssize_t retval;
size_t image_size;
if (minor == CONTROL_MINOR)
return 0;
mutex_lock(&image[minor].mutex);
image_size = vme_get_size(image[minor].resource);
/* Ensure we are starting at a valid location */
if ((*ppos < 0) || (*ppos > (image_size - 1))) {
mutex_unlock(&image[minor].mutex);
return 0;
}
/* Ensure not reading past end of the image */
if (*ppos + count > image_size)
count = image_size - *ppos;
switch (type[minor]) {
case MASTER_MINOR:
retval = resource_from_user(minor, buf, count, ppos);
break;
case SLAVE_MINOR:
retval = buffer_from_user(minor, buf, count, ppos);
break;
default:
retval = -EINVAL;
}
mutex_unlock(&image[minor].mutex);
if (retval > 0)
*ppos += retval;
return retval;
}
static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
{
unsigned int minor = iminor(file_inode(file));
size_t image_size;
loff_t res;
switch (type[minor]) {
case MASTER_MINOR:
case SLAVE_MINOR:
mutex_lock(&image[minor].mutex);
image_size = vme_get_size(image[minor].resource);
res = fixed_size_llseek(file, off, whence, image_size);
mutex_unlock(&image[minor].mutex);
return res;
}
return -EINVAL;
}
/*
* The ioctls provided by the old VME access method (the one at vmelinux.org)
* are most certainly wrong as the effectively push the registers layout
* through to user space. Given that the VME core can handle multiple bridges,
* with different register layouts this is most certainly not the way to go.
*
* We aren't using the structures defined in the Motorola driver either - these
* are also quite low level, however we should use the definitions that have
* already been defined.
*/
static int vme_user_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct vme_master master;
struct vme_slave slave;
struct vme_irq_id irq_req;
unsigned long copied;
unsigned int minor = iminor(inode);
int retval;
dma_addr_t pci_addr;
void __user *argp = (void __user *)arg;
switch (type[minor]) {
case CONTROL_MINOR:
switch (cmd) {
case VME_IRQ_GEN:
copied = copy_from_user(&irq_req, argp,
sizeof(irq_req));
if (copied) {
pr_warn("Partial copy from userspace\n");
return -EFAULT;
}
return vme_irq_generate(vme_user_bridge,
irq_req.level,
irq_req.statid);
}
break;
case MASTER_MINOR:
switch (cmd) {
case VME_GET_MASTER:
memset(&master, 0, sizeof(master));
/* XXX We do not want to push aspace, cycle and width
* to userspace as they are
*/
retval = vme_master_get(image[minor].resource,
&master.enable,
&master.vme_addr,
&master.size, &master.aspace,
&master.cycle, &master.dwidth);
copied = copy_to_user(argp, &master,
sizeof(master));
if (copied) {
pr_warn("Partial copy to userspace\n");
return -EFAULT;
}
return retval;
case VME_SET_MASTER:
if (image[minor].mmap_count != 0) {
pr_warn("Can't adjust mapped window\n");
return -EPERM;
}
copied = copy_from_user(&master, argp, sizeof(master));
if (copied) {
pr_warn("Partial copy from userspace\n");
return -EFAULT;
}
/* XXX We do not want to push aspace, cycle and width
* to userspace as they are
*/
return vme_master_set(image[minor].resource,
master.enable, master.vme_addr, master.size,
master.aspace, master.cycle, master.dwidth);
break;
}
break;
case SLAVE_MINOR:
switch (cmd) {
case VME_GET_SLAVE:
memset(&slave, 0, sizeof(slave));
/* XXX We do not want to push aspace, cycle and width
* to userspace as they are
*/
retval = vme_slave_get(image[minor].resource,
&slave.enable, &slave.vme_addr,
&slave.size, &pci_addr,
&slave.aspace, &slave.cycle);
copied = copy_to_user(argp, &slave,
sizeof(slave));
if (copied) {
pr_warn("Partial copy to userspace\n");
return -EFAULT;
}
return retval;
case VME_SET_SLAVE:
copied = copy_from_user(&slave, argp, sizeof(slave));
if (copied) {
pr_warn("Partial copy from userspace\n");
return -EFAULT;
}
/* XXX We do not want to push aspace, cycle and width
* to userspace as they are
*/
return vme_slave_set(image[minor].resource,
slave.enable, slave.vme_addr, slave.size,
image[minor].pci_buf, slave.aspace,
slave.cycle);
break;
}
break;
}
return -EINVAL;
}
static long
vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret;
struct inode *inode = file_inode(file);
unsigned int minor = iminor(inode);
mutex_lock(&image[minor].mutex);
ret = vme_user_ioctl(inode, file, cmd, arg);
mutex_unlock(&image[minor].mutex);
return ret;
}
static void vme_user_vm_open(struct vm_area_struct *vma)
{
struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
refcount_inc(&vma_priv->refcnt);
}
static void vme_user_vm_close(struct vm_area_struct *vma)
{
struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
unsigned int minor = vma_priv->minor;
if (!refcount_dec_and_test(&vma_priv->refcnt))
return;
mutex_lock(&image[minor].mutex);
image[minor].mmap_count--;
mutex_unlock(&image[minor].mutex);
kfree(vma_priv);
}
static const struct vm_operations_struct vme_user_vm_ops = {
.open = vme_user_vm_open,
.close = vme_user_vm_close,
};
static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
{
int err;
struct vme_user_vma_priv *vma_priv;
mutex_lock(&image[minor].mutex);
err = vme_master_mmap(image[minor].resource, vma);
if (err) {
mutex_unlock(&image[minor].mutex);
return err;
}
vma_priv = kmalloc(sizeof(*vma_priv), GFP_KERNEL);
if (!vma_priv) {
mutex_unlock(&image[minor].mutex);
return -ENOMEM;
}
vma_priv->minor = minor;
refcount_set(&vma_priv->refcnt, 1);
vma->vm_ops = &vme_user_vm_ops;
vma->vm_private_data = vma_priv;
image[minor].mmap_count++;
mutex_unlock(&image[minor].mutex);
return 0;
}
static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned int minor = iminor(file_inode(file));
if (type[minor] == MASTER_MINOR)
return vme_user_master_mmap(minor, vma);
return -ENODEV;
}
static const struct file_operations vme_user_fops = {
.read = vme_user_read,
.write = vme_user_write,
.llseek = vme_user_llseek,
.unlocked_ioctl = vme_user_unlocked_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.mmap = vme_user_mmap,
};
static int vme_user_match(struct vme_dev *vdev)
{
int i;
int cur_bus = vme_bus_num(vdev);
int cur_slot = vme_slot_num(vdev);
for (i = 0; i < bus_num; i++)
if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
return 1;
return 0;
}
/*
* In this simple access driver, the old behaviour is being preserved as much
* as practical. We will therefore reserve the buffers and request the images
* here so that we don't have to do it later.
*/
static int vme_user_probe(struct vme_dev *vdev)
{
int i, err;
char *name;
/* Save pointer to the bridge device */
if (vme_user_bridge) {
dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
err = -EINVAL;
goto err_dev;
}
vme_user_bridge = vdev;
/* Initialise descriptors */
for (i = 0; i < VME_DEVS; i++) {
image[i].kern_buf = NULL;
image[i].pci_buf = 0;
mutex_init(&image[i].mutex);
image[i].device = NULL;
image[i].resource = NULL;
}
/* Assign major and minor numbers for the driver */
err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
driver_name);
if (err) {
dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
VME_MAJOR);
goto err_region;
}
/* Register the driver as a char device */
vme_user_cdev = cdev_alloc();
if (!vme_user_cdev) {
err = -ENOMEM;
goto err_char;
}
vme_user_cdev->ops = &vme_user_fops;
vme_user_cdev->owner = THIS_MODULE;
err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
if (err)
goto err_class;
/* Request slave resources and allocate buffers (128kB wide) */
for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
/* XXX Need to properly request attributes */
/* For ca91cx42 bridge there are only two slave windows
* supporting A16 addressing, so we request A24 supported
* by all windows.
*/
image[i].resource = vme_slave_request(vme_user_bridge,
VME_A24, VME_SCT);
if (!image[i].resource) {
dev_warn(&vdev->dev,
"Unable to allocate slave resource\n");
err = -ENOMEM;
goto err_slave;
}
image[i].size_buf = PCI_BUF_SIZE;
image[i].kern_buf = vme_alloc_consistent(image[i].resource,
image[i].size_buf,
&image[i].pci_buf);
if (!image[i].kern_buf) {
dev_warn(&vdev->dev,
"Unable to allocate memory for buffer\n");
image[i].pci_buf = 0;
vme_slave_free(image[i].resource);
err = -ENOMEM;
goto err_slave;
}
}
/*
* Request master resources allocate page sized buffers for small
* reads and writes
*/
for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
/* XXX Need to properly request attributes */
image[i].resource = vme_master_request(vme_user_bridge,
VME_A32, VME_SCT,
VME_D32);
if (!image[i].resource) {
dev_warn(&vdev->dev,
"Unable to allocate master resource\n");
err = -ENOMEM;
goto err_master;
}
image[i].size_buf = PCI_BUF_SIZE;
image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
if (!image[i].kern_buf) {
err = -ENOMEM;
vme_master_free(image[i].resource);
goto err_master;
}
}
/* Create sysfs entries - on udev systems this creates the dev files */
vme_user_sysfs_class = class_create(driver_name);
if (IS_ERR(vme_user_sysfs_class)) {
dev_err(&vdev->dev, "Error creating vme_user class.\n");
err = PTR_ERR(vme_user_sysfs_class);
goto err_master;
}
/* Add sysfs Entries */
for (i = 0; i < VME_DEVS; i++) {
int num;
switch (type[i]) {
case MASTER_MINOR:
name = "bus/vme/m%d";
break;
case CONTROL_MINOR:
name = "bus/vme/ctl";
break;
case SLAVE_MINOR:
name = "bus/vme/s%d";
break;
default:
err = -EINVAL;
goto err_sysfs;
}
num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
image[i].device = device_create(vme_user_sysfs_class, NULL,
MKDEV(VME_MAJOR, i), NULL,
name, num);
if (IS_ERR(image[i].device)) {
dev_info(&vdev->dev, "Error creating sysfs device\n");
err = PTR_ERR(image[i].device);
goto err_sysfs;
}
}
return 0;
err_sysfs:
while (i > 0) {
i--;
device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
}
class_destroy(vme_user_sysfs_class);
/* Ensure counter set correctly to unalloc all master windows */
i = MASTER_MAX + 1;
err_master:
while (i > MASTER_MINOR) {
i--;
kfree(image[i].kern_buf);
vme_master_free(image[i].resource);
}
/*
* Ensure counter set correctly to unalloc all slave windows and buffers
*/
i = SLAVE_MAX + 1;
err_slave:
while (i > SLAVE_MINOR) {
i--;
vme_free_consistent(image[i].resource, image[i].size_buf,
image[i].kern_buf, image[i].pci_buf);
vme_slave_free(image[i].resource);
}
err_class:
cdev_del(vme_user_cdev);
err_char:
unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
err_region:
err_dev:
return err;
}
static void vme_user_remove(struct vme_dev *dev)
{
int i;
/* Remove sysfs Entries */
for (i = 0; i < VME_DEVS; i++) {
mutex_destroy(&image[i].mutex);
device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
}
class_destroy(vme_user_sysfs_class);
for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
kfree(image[i].kern_buf);
vme_master_free(image[i].resource);
}
for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
vme_free_consistent(image[i].resource, image[i].size_buf,
image[i].kern_buf, image[i].pci_buf);
vme_slave_free(image[i].resource);
}
/* Unregister device driver */
cdev_del(vme_user_cdev);
/* Unregister the major and minor device numbers */
unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
}
static struct vme_driver vme_user_driver = {
.name = driver_name,
.match = vme_user_match,
.probe = vme_user_probe,
.remove = vme_user_remove,
};
static int __init vme_user_init(void)
{
int retval = 0;
pr_info("VME User Space Access Driver\n");
if (bus_num == 0) {
pr_err("No cards, skipping registration\n");
retval = -ENODEV;
goto err_nocard;
}
/* Let's start by supporting one bus, we can support more than one
* in future revisions if that ever becomes necessary.
*/
if (bus_num > VME_USER_BUS_MAX) {
pr_err("Driver only able to handle %d buses\n",
VME_USER_BUS_MAX);
bus_num = VME_USER_BUS_MAX;
}
/*
* Here we just register the maximum number of devices we can and
* leave vme_user_match() to allow only 1 to go through to probe().
* This way, if we later want to allow multiple user access devices,
* we just change the code in vme_user_match().
*/
retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
if (retval)
goto err_reg;
return retval;
err_reg:
err_nocard:
return retval;
}
static void __exit vme_user_exit(void)
{
vme_unregister_driver(&vme_user_driver);
}
MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
module_param_array(bus, int, &bus_num, 0000);
MODULE_DESCRIPTION("VME User Space Access Driver");
MODULE_AUTHOR("Martyn Welch <[email protected]>");
MODULE_LICENSE("GPL");
module_init(vme_user_init);
module_exit(vme_user_exit);
| linux-master | drivers/staging/vme_user/vme_user.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Fake VME bridge support.
*
* This drive provides a fake VME bridge chip, this enables debugging of the
* VME framework in the absence of a VME system.
*
* This driver has to do a number of things in software that would be driven
* by hardware if it was available, it will also result in extra overhead at
* times when compared with driving actual hardware.
*
* Author: Martyn Welch <[email protected]>
* Copyright (c) 2014 Martyn Welch
*
* Based on vme_tsi148.c:
*
* Author: Martyn Welch <[email protected]>
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* Based on work by Tom Armistead and Ajit Prem
* Copyright 2004 Motorola Inc.
*/
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include "vme.h"
#include "vme_bridge.h"
/*
* Define the number of each that the fake driver supports.
*/
#define FAKE_MAX_MASTER 8 /* Max Master Windows */
#define FAKE_MAX_SLAVE 8 /* Max Slave Windows */
/* Structures to hold information normally held in device registers */
struct fake_slave_window {
int enabled;
unsigned long long vme_base;
unsigned long long size;
void *buf_base;
u32 aspace;
u32 cycle;
};
struct fake_master_window {
int enabled;
unsigned long long vme_base;
unsigned long long size;
u32 aspace;
u32 cycle;
u32 dwidth;
};
/* Structure used to hold driver specific information */
struct fake_driver {
struct vme_bridge *parent;
struct fake_slave_window slaves[FAKE_MAX_SLAVE];
struct fake_master_window masters[FAKE_MAX_MASTER];
u32 lm_enabled;
unsigned long long lm_base;
u32 lm_aspace;
u32 lm_cycle;
void (*lm_callback[4])(void *);
void *lm_data[4];
struct tasklet_struct int_tasklet;
int int_level;
int int_statid;
void *crcsr_kernel;
dma_addr_t crcsr_bus;
/* Only one VME interrupt can be generated at a time, provide locking */
struct mutex vme_int;
};
/* Module parameter */
static int geoid;
static const char driver_name[] = "vme_fake";
static struct vme_bridge *exit_pointer;
static struct device *vme_root;
/*
* Calling VME bus interrupt callback if provided.
*/
static void fake_VIRQ_tasklet(unsigned long data)
{
struct vme_bridge *fake_bridge;
struct fake_driver *bridge;
fake_bridge = (struct vme_bridge *) data;
bridge = fake_bridge->driver_priv;
vme_irq_handler(fake_bridge, bridge->int_level, bridge->int_statid);
}
/*
* Configure VME interrupt
*/
static void fake_irq_set(struct vme_bridge *fake_bridge, int level,
int state, int sync)
{
/* Nothing to do */
}
static void *fake_pci_to_ptr(dma_addr_t addr)
{
return (void *)(uintptr_t)addr;
}
static dma_addr_t fake_ptr_to_pci(void *addr)
{
return (dma_addr_t)(uintptr_t)addr;
}
/*
* Generate a VME bus interrupt at the requested level & vector. Wait for
* interrupt to be acked.
*/
static int fake_irq_generate(struct vme_bridge *fake_bridge, int level,
int statid)
{
struct fake_driver *bridge;
bridge = fake_bridge->driver_priv;
mutex_lock(&bridge->vme_int);
bridge->int_level = level;
bridge->int_statid = statid;
/*
* Schedule tasklet to run VME handler to emulate normal VME interrupt
* handler behaviour.
*/
tasklet_schedule(&bridge->int_tasklet);
mutex_unlock(&bridge->vme_int);
return 0;
}
/*
* Initialize a slave window with the requested attributes.
*/
static int fake_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
dma_addr_t buf_base, u32 aspace, u32 cycle)
{
unsigned int i, granularity = 0;
unsigned long long vme_bound;
struct vme_bridge *fake_bridge;
struct fake_driver *bridge;
fake_bridge = image->parent;
bridge = fake_bridge->driver_priv;
i = image->number;
switch (aspace) {
case VME_A16:
granularity = 0x10;
break;
case VME_A24:
granularity = 0x1000;
break;
case VME_A32:
granularity = 0x10000;
break;
case VME_A64:
granularity = 0x10000;
break;
case VME_CRCSR:
case VME_USER1:
case VME_USER2:
case VME_USER3:
case VME_USER4:
default:
pr_err("Invalid address space\n");
return -EINVAL;
}
/*
* Bound address is a valid address for the window, adjust
* accordingly
*/
vme_bound = vme_base + size - granularity;
if (vme_base & (granularity - 1)) {
pr_err("Invalid VME base alignment\n");
return -EINVAL;
}
if (vme_bound & (granularity - 1)) {
pr_err("Invalid VME bound alignment\n");
return -EINVAL;
}
mutex_lock(&image->mtx);
bridge->slaves[i].enabled = enabled;
bridge->slaves[i].vme_base = vme_base;
bridge->slaves[i].size = size;
bridge->slaves[i].buf_base = fake_pci_to_ptr(buf_base);
bridge->slaves[i].aspace = aspace;
bridge->slaves[i].cycle = cycle;
mutex_unlock(&image->mtx);
return 0;
}
/*
* Get slave window configuration.
*/
static int fake_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
{
unsigned int i;
struct fake_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
mutex_lock(&image->mtx);
*enabled = bridge->slaves[i].enabled;
*vme_base = bridge->slaves[i].vme_base;
*size = bridge->slaves[i].size;
*buf_base = fake_ptr_to_pci(bridge->slaves[i].buf_base);
*aspace = bridge->slaves[i].aspace;
*cycle = bridge->slaves[i].cycle;
mutex_unlock(&image->mtx);
return 0;
}
/*
* Set the attributes of an outbound window.
*/
static int fake_master_set(struct vme_master_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
u32 aspace, u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i;
struct vme_bridge *fake_bridge;
struct fake_driver *bridge;
fake_bridge = image->parent;
bridge = fake_bridge->driver_priv;
/* Verify input data */
if (vme_base & 0xFFFF) {
pr_err("Invalid VME Window alignment\n");
retval = -EINVAL;
goto err_window;
}
if (size & 0xFFFF) {
pr_err("Invalid size alignment\n");
retval = -EINVAL;
goto err_window;
}
if ((size == 0) && (enabled != 0)) {
pr_err("Size must be non-zero for enabled windows\n");
retval = -EINVAL;
goto err_window;
}
/* Setup data width */
switch (dwidth) {
case VME_D8:
case VME_D16:
case VME_D32:
break;
default:
pr_err("Invalid data width\n");
retval = -EINVAL;
goto err_dwidth;
}
/* Setup address space */
switch (aspace) {
case VME_A16:
case VME_A24:
case VME_A32:
case VME_A64:
case VME_CRCSR:
case VME_USER1:
case VME_USER2:
case VME_USER3:
case VME_USER4:
break;
default:
pr_err("Invalid address space\n");
retval = -EINVAL;
goto err_aspace;
}
spin_lock(&image->lock);
i = image->number;
bridge->masters[i].enabled = enabled;
bridge->masters[i].vme_base = vme_base;
bridge->masters[i].size = size;
bridge->masters[i].aspace = aspace;
bridge->masters[i].cycle = cycle;
bridge->masters[i].dwidth = dwidth;
spin_unlock(&image->lock);
return 0;
err_aspace:
err_dwidth:
err_window:
return retval;
}
/*
* Set the attributes of an outbound window.
*/
static int __fake_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
u32 *aspace, u32 *cycle, u32 *dwidth)
{
unsigned int i;
struct fake_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
*enabled = bridge->masters[i].enabled;
*vme_base = bridge->masters[i].vme_base;
*size = bridge->masters[i].size;
*aspace = bridge->masters[i].aspace;
*cycle = bridge->masters[i].cycle;
*dwidth = bridge->masters[i].dwidth;
return 0;
}
static int fake_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
u32 *aspace, u32 *cycle, u32 *dwidth)
{
int retval;
spin_lock(&image->lock);
retval = __fake_master_get(image, enabled, vme_base, size, aspace,
cycle, dwidth);
spin_unlock(&image->lock);
return retval;
}
static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
u32 aspace, u32 cycle)
{
struct vme_bridge *fake_bridge;
unsigned long long lm_base;
u32 lm_aspace, lm_cycle;
int i;
struct vme_lm_resource *lm;
struct list_head *pos = NULL, *n;
/* Get vme_bridge */
fake_bridge = bridge->parent;
/* Loop through each location monitor resource */
list_for_each_safe(pos, n, &fake_bridge->lm_resources) {
lm = list_entry(pos, struct vme_lm_resource, list);
/* If disabled, we're done */
if (bridge->lm_enabled == 0)
return;
lm_base = bridge->lm_base;
lm_aspace = bridge->lm_aspace;
lm_cycle = bridge->lm_cycle;
/* First make sure that the cycle and address space match */
if ((lm_aspace == aspace) && (lm_cycle == cycle)) {
for (i = 0; i < lm->monitors; i++) {
/* Each location monitor covers 8 bytes */
if (((lm_base + (8 * i)) <= addr) &&
((lm_base + (8 * i) + 8) > addr)) {
if (bridge->lm_callback[i])
bridge->lm_callback[i](
bridge->lm_data[i]);
}
}
}
}
}
static noinline_for_stack u8 fake_vmeread8(struct fake_driver *bridge,
unsigned long long addr,
u32 aspace, u32 cycle)
{
u8 retval = 0xff;
int i;
unsigned long long start, end, offset;
u8 *loc;
for (i = 0; i < FAKE_MAX_SLAVE; i++) {
start = bridge->slaves[i].vme_base;
end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
if (aspace != bridge->slaves[i].aspace)
continue;
if (cycle != bridge->slaves[i].cycle)
continue;
if ((addr >= start) && (addr < end)) {
offset = addr - bridge->slaves[i].vme_base;
loc = (u8 *)(bridge->slaves[i].buf_base + offset);
retval = *loc;
break;
}
}
fake_lm_check(bridge, addr, aspace, cycle);
return retval;
}
static noinline_for_stack u16 fake_vmeread16(struct fake_driver *bridge,
unsigned long long addr,
u32 aspace, u32 cycle)
{
u16 retval = 0xffff;
int i;
unsigned long long start, end, offset;
u16 *loc;
for (i = 0; i < FAKE_MAX_SLAVE; i++) {
if (aspace != bridge->slaves[i].aspace)
continue;
if (cycle != bridge->slaves[i].cycle)
continue;
start = bridge->slaves[i].vme_base;
end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
if ((addr >= start) && ((addr + 1) < end)) {
offset = addr - bridge->slaves[i].vme_base;
loc = (u16 *)(bridge->slaves[i].buf_base + offset);
retval = *loc;
break;
}
}
fake_lm_check(bridge, addr, aspace, cycle);
return retval;
}
static noinline_for_stack u32 fake_vmeread32(struct fake_driver *bridge,
unsigned long long addr,
u32 aspace, u32 cycle)
{
u32 retval = 0xffffffff;
int i;
unsigned long long start, end, offset;
u32 *loc;
for (i = 0; i < FAKE_MAX_SLAVE; i++) {
if (aspace != bridge->slaves[i].aspace)
continue;
if (cycle != bridge->slaves[i].cycle)
continue;
start = bridge->slaves[i].vme_base;
end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
if ((addr >= start) && ((addr + 3) < end)) {
offset = addr - bridge->slaves[i].vme_base;
loc = (u32 *)(bridge->slaves[i].buf_base + offset);
retval = *loc;
break;
}
}
fake_lm_check(bridge, addr, aspace, cycle);
return retval;
}
static ssize_t fake_master_read(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval;
u32 aspace, cycle, dwidth;
struct vme_bridge *fake_bridge;
struct fake_driver *priv;
int i;
unsigned long long addr;
unsigned int done = 0;
unsigned int count32;
fake_bridge = image->parent;
priv = fake_bridge->driver_priv;
i = image->number;
addr = (unsigned long long)priv->masters[i].vme_base + offset;
aspace = priv->masters[i].aspace;
cycle = priv->masters[i].cycle;
dwidth = priv->masters[i].dwidth;
spin_lock(&image->lock);
/* The following code handles VME address alignment. We cannot use
* memcpy_xxx here because it may cut data transfers in to 8-bit
* cycles when D16 or D32 cycles are required on the VME bus.
* On the other hand, the bridge itself assures that the maximum data
* cycle configured for the transfer is used and splits it
* automatically for non-aligned addresses, so we don't want the
* overhead of needlessly forcing small transfers for the entire cycle.
*/
if (addr & 0x1) {
*(u8 *)buf = fake_vmeread8(priv, addr, aspace, cycle);
done += 1;
if (done == count)
goto out;
}
if ((dwidth == VME_D16) || (dwidth == VME_D32)) {
if ((addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = fake_vmeread8(priv,
addr + done, aspace, cycle);
done += 1;
goto out;
} else {
*(u16 *)(buf + done) = fake_vmeread16(priv,
addr + done, aspace, cycle);
done += 2;
}
}
}
if (dwidth == VME_D32) {
count32 = (count - done) & ~0x3;
while (done < count32) {
*(u32 *)(buf + done) = fake_vmeread32(priv, addr + done,
aspace, cycle);
done += 4;
}
} else if (dwidth == VME_D16) {
count32 = (count - done) & ~0x3;
while (done < count32) {
*(u16 *)(buf + done) = fake_vmeread16(priv, addr + done,
aspace, cycle);
done += 2;
}
} else if (dwidth == VME_D8) {
count32 = (count - done);
while (done < count32) {
*(u8 *)(buf + done) = fake_vmeread8(priv, addr + done,
aspace, cycle);
done += 1;
}
}
if ((dwidth == VME_D16) || (dwidth == VME_D32)) {
if ((count - done) & 0x2) {
*(u16 *)(buf + done) = fake_vmeread16(priv, addr + done,
aspace, cycle);
done += 2;
}
}
if ((count - done) & 0x1) {
*(u8 *)(buf + done) = fake_vmeread8(priv, addr + done, aspace,
cycle);
done += 1;
}
out:
retval = count;
spin_unlock(&image->lock);
return retval;
}
static noinline_for_stack void fake_vmewrite8(struct fake_driver *bridge,
u8 *buf, unsigned long long addr,
u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
u8 *loc;
for (i = 0; i < FAKE_MAX_SLAVE; i++) {
if (aspace != bridge->slaves[i].aspace)
continue;
if (cycle != bridge->slaves[i].cycle)
continue;
start = bridge->slaves[i].vme_base;
end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
if ((addr >= start) && (addr < end)) {
offset = addr - bridge->slaves[i].vme_base;
loc = (u8 *)((void *)bridge->slaves[i].buf_base + offset);
*loc = *buf;
break;
}
}
fake_lm_check(bridge, addr, aspace, cycle);
}
static noinline_for_stack void fake_vmewrite16(struct fake_driver *bridge,
u16 *buf, unsigned long long addr,
u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
u16 *loc;
for (i = 0; i < FAKE_MAX_SLAVE; i++) {
if (aspace != bridge->slaves[i].aspace)
continue;
if (cycle != bridge->slaves[i].cycle)
continue;
start = bridge->slaves[i].vme_base;
end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
if ((addr >= start) && ((addr + 1) < end)) {
offset = addr - bridge->slaves[i].vme_base;
loc = (u16 *)((void *)bridge->slaves[i].buf_base + offset);
*loc = *buf;
break;
}
}
fake_lm_check(bridge, addr, aspace, cycle);
}
static noinline_for_stack void fake_vmewrite32(struct fake_driver *bridge,
u32 *buf, unsigned long long addr,
u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
u32 *loc;
for (i = 0; i < FAKE_MAX_SLAVE; i++) {
if (aspace != bridge->slaves[i].aspace)
continue;
if (cycle != bridge->slaves[i].cycle)
continue;
start = bridge->slaves[i].vme_base;
end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
if ((addr >= start) && ((addr + 3) < end)) {
offset = addr - bridge->slaves[i].vme_base;
loc = (u32 *)((void *)bridge->slaves[i].buf_base + offset);
*loc = *buf;
break;
}
}
fake_lm_check(bridge, addr, aspace, cycle);
}
static ssize_t fake_master_write(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval = 0;
u32 aspace, cycle, dwidth;
unsigned long long addr;
int i;
unsigned int done = 0;
unsigned int count32;
struct vme_bridge *fake_bridge;
struct fake_driver *bridge;
fake_bridge = image->parent;
bridge = fake_bridge->driver_priv;
i = image->number;
addr = bridge->masters[i].vme_base + offset;
aspace = bridge->masters[i].aspace;
cycle = bridge->masters[i].cycle;
dwidth = bridge->masters[i].dwidth;
spin_lock(&image->lock);
/* Here we apply for the same strategy we do in master_read
* function in order to assure the correct cycles.
*/
if (addr & 0x1) {
fake_vmewrite8(bridge, (u8 *)buf, addr, aspace, cycle);
done += 1;
if (done == count)
goto out;
}
if ((dwidth == VME_D16) || (dwidth == VME_D32)) {
if ((addr + done) & 0x2) {
if ((count - done) < 2) {
fake_vmewrite8(bridge, (u8 *)(buf + done),
addr + done, aspace, cycle);
done += 1;
goto out;
} else {
fake_vmewrite16(bridge, (u16 *)(buf + done),
addr + done, aspace, cycle);
done += 2;
}
}
}
if (dwidth == VME_D32) {
count32 = (count - done) & ~0x3;
while (done < count32) {
fake_vmewrite32(bridge, (u32 *)(buf + done),
addr + done, aspace, cycle);
done += 4;
}
} else if (dwidth == VME_D16) {
count32 = (count - done) & ~0x3;
while (done < count32) {
fake_vmewrite16(bridge, (u16 *)(buf + done),
addr + done, aspace, cycle);
done += 2;
}
} else if (dwidth == VME_D8) {
count32 = (count - done);
while (done < count32) {
fake_vmewrite8(bridge, (u8 *)(buf + done), addr + done,
aspace, cycle);
done += 1;
}
}
if ((dwidth == VME_D16) || (dwidth == VME_D32)) {
if ((count - done) & 0x2) {
fake_vmewrite16(bridge, (u16 *)(buf + done),
addr + done, aspace, cycle);
done += 2;
}
}
if ((count - done) & 0x1) {
fake_vmewrite8(bridge, (u8 *)(buf + done), addr + done, aspace,
cycle);
done += 1;
}
out:
retval = count;
spin_unlock(&image->lock);
return retval;
}
/*
* Perform an RMW cycle on the VME bus.
*
* Requires a previously configured master window, returns final value.
*/
static unsigned int fake_master_rmw(struct vme_master_resource *image,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset)
{
u32 tmp, base;
u32 aspace, cycle;
int i;
struct fake_driver *bridge;
bridge = image->parent->driver_priv;
/* Find the PCI address that maps to the desired VME address */
i = image->number;
base = bridge->masters[i].vme_base;
aspace = bridge->masters[i].aspace;
cycle = bridge->masters[i].cycle;
/* Lock image */
spin_lock(&image->lock);
/* Read existing value */
tmp = fake_vmeread32(bridge, base + offset, aspace, cycle);
/* Perform check */
if ((tmp && mask) == (compare && mask)) {
tmp = tmp | (mask | swap);
tmp = tmp & (~mask | swap);
/* Write back */
fake_vmewrite32(bridge, &tmp, base + offset, aspace, cycle);
}
/* Unlock image */
spin_unlock(&image->lock);
return tmp;
}
/*
* All 4 location monitors reside at the same base - this is therefore a
* system wide configuration.
*
* This does not enable the LM monitor - that should be done when the first
* callback is attached and disabled when the last callback is removed.
*/
static int fake_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
u32 aspace, u32 cycle)
{
int i;
struct vme_bridge *fake_bridge;
struct fake_driver *bridge;
fake_bridge = lm->parent;
bridge = fake_bridge->driver_priv;
mutex_lock(&lm->mtx);
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
pr_err("Location monitor callback attached, can't reset\n");
return -EBUSY;
}
}
switch (aspace) {
case VME_A16:
case VME_A24:
case VME_A32:
case VME_A64:
break;
default:
mutex_unlock(&lm->mtx);
pr_err("Invalid address space\n");
return -EINVAL;
}
bridge->lm_base = lm_base;
bridge->lm_aspace = aspace;
bridge->lm_cycle = cycle;
mutex_unlock(&lm->mtx);
return 0;
}
/* Get configuration of the callback monitor and return whether it is enabled
* or disabled.
*/
static int fake_lm_get(struct vme_lm_resource *lm,
unsigned long long *lm_base, u32 *aspace, u32 *cycle)
{
struct fake_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&lm->mtx);
*lm_base = bridge->lm_base;
*aspace = bridge->lm_aspace;
*cycle = bridge->lm_cycle;
mutex_unlock(&lm->mtx);
return bridge->lm_enabled;
}
/*
* Attach a callback to a specific location monitor.
*
* Callback will be passed the monitor triggered.
*/
static int fake_lm_attach(struct vme_lm_resource *lm, int monitor,
void (*callback)(void *), void *data)
{
struct vme_bridge *fake_bridge;
struct fake_driver *bridge;
fake_bridge = lm->parent;
bridge = fake_bridge->driver_priv;
mutex_lock(&lm->mtx);
/* Ensure that the location monitor is configured - need PGM or DATA */
if (bridge->lm_cycle == 0) {
mutex_unlock(&lm->mtx);
pr_err("Location monitor not properly configured\n");
return -EINVAL;
}
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
pr_err("Existing callback attached\n");
return -EBUSY;
}
/* Attach callback */
bridge->lm_callback[monitor] = callback;
bridge->lm_data[monitor] = data;
/* Ensure that global Location Monitor Enable set */
bridge->lm_enabled = 1;
mutex_unlock(&lm->mtx);
return 0;
}
/*
* Detach a callback function forn a specific location monitor.
*/
static int fake_lm_detach(struct vme_lm_resource *lm, int monitor)
{
u32 tmp;
int i;
struct fake_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&lm->mtx);
/* Detach callback */
bridge->lm_callback[monitor] = NULL;
bridge->lm_data[monitor] = NULL;
/* If all location monitors disabled, disable global Location Monitor */
tmp = 0;
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i])
tmp = 1;
}
if (tmp == 0)
bridge->lm_enabled = 0;
mutex_unlock(&lm->mtx);
return 0;
}
/*
* Determine Geographical Addressing
*/
static int fake_slot_get(struct vme_bridge *fake_bridge)
{
return geoid;
}
static void *fake_alloc_consistent(struct device *parent, size_t size,
dma_addr_t *dma)
{
void *alloc = kmalloc(size, GFP_KERNEL);
if (alloc)
*dma = fake_ptr_to_pci(alloc);
return alloc;
}
static void fake_free_consistent(struct device *parent, size_t size,
void *vaddr, dma_addr_t dma)
{
kfree(vaddr);
/*
dma_free_coherent(parent, size, vaddr, dma);
*/
}
/*
* Configure CR/CSR space
*
* Access to the CR/CSR can be configured at power-up. The location of the
* CR/CSR registers in the CR/CSR address space is determined by the boards
* Geographic address.
*
* Each board has a 512kB window, with the highest 4kB being used for the
* boards registers, this means there is a fix length 508kB window which must
* be mapped onto PCI memory.
*/
static int fake_crcsr_init(struct vme_bridge *fake_bridge)
{
u32 vstat;
struct fake_driver *bridge;
bridge = fake_bridge->driver_priv;
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = kzalloc(VME_CRCSR_BUF_SIZE, GFP_KERNEL);
bridge->crcsr_bus = fake_ptr_to_pci(bridge->crcsr_kernel);
if (!bridge->crcsr_kernel)
return -ENOMEM;
vstat = fake_slot_get(fake_bridge);
pr_info("CR/CSR Offset: %d\n", vstat);
return 0;
}
static void fake_crcsr_exit(struct vme_bridge *fake_bridge)
{
struct fake_driver *bridge;
bridge = fake_bridge->driver_priv;
kfree(bridge->crcsr_kernel);
}
static int __init fake_init(void)
{
int retval, i;
struct list_head *pos = NULL, *n;
struct vme_bridge *fake_bridge;
struct fake_driver *fake_device;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_lm_resource *lm;
/* We need a fake parent device */
vme_root = root_device_register("vme");
if (IS_ERR(vme_root))
return PTR_ERR(vme_root);
/* If we want to support more than one bridge at some point, we need to
* dynamically allocate this so we get one per device.
*/
fake_bridge = kzalloc(sizeof(*fake_bridge), GFP_KERNEL);
if (!fake_bridge) {
retval = -ENOMEM;
goto err_struct;
}
fake_device = kzalloc(sizeof(*fake_device), GFP_KERNEL);
if (!fake_device) {
retval = -ENOMEM;
goto err_driver;
}
fake_bridge->driver_priv = fake_device;
fake_bridge->parent = vme_root;
fake_device->parent = fake_bridge;
/* Initialize wait queues & mutual exclusion flags */
mutex_init(&fake_device->vme_int);
mutex_init(&fake_bridge->irq_mtx);
tasklet_init(&fake_device->int_tasklet, fake_VIRQ_tasklet,
(unsigned long) fake_bridge);
strcpy(fake_bridge->name, driver_name);
/* Add master windows to list */
INIT_LIST_HEAD(&fake_bridge->master_resources);
for (i = 0; i < FAKE_MAX_MASTER; i++) {
master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
master_image->parent = fake_bridge;
spin_lock_init(&master_image->lock);
master_image->locked = 0;
master_image->number = i;
master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
VME_A64;
master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
master_image->width_attr = VME_D16 | VME_D32;
memset(&master_image->bus_resource, 0,
sizeof(struct resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&fake_bridge->master_resources);
}
/* Add slave windows to list */
INIT_LIST_HEAD(&fake_bridge->slave_resources);
for (i = 0; i < FAKE_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
slave_image->parent = fake_bridge;
mutex_init(&slave_image->mtx);
slave_image->locked = 0;
slave_image->number = i;
slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
VME_USER3 | VME_USER4;
slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
list_add_tail(&slave_image->list,
&fake_bridge->slave_resources);
}
/* Add location monitor to list */
INIT_LIST_HEAD(&fake_bridge->lm_resources);
lm = kmalloc(sizeof(*lm), GFP_KERNEL);
if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
lm->parent = fake_bridge;
mutex_init(&lm->mtx);
lm->locked = 0;
lm->number = 1;
lm->monitors = 4;
list_add_tail(&lm->list, &fake_bridge->lm_resources);
fake_bridge->slave_get = fake_slave_get;
fake_bridge->slave_set = fake_slave_set;
fake_bridge->master_get = fake_master_get;
fake_bridge->master_set = fake_master_set;
fake_bridge->master_read = fake_master_read;
fake_bridge->master_write = fake_master_write;
fake_bridge->master_rmw = fake_master_rmw;
fake_bridge->irq_set = fake_irq_set;
fake_bridge->irq_generate = fake_irq_generate;
fake_bridge->lm_set = fake_lm_set;
fake_bridge->lm_get = fake_lm_get;
fake_bridge->lm_attach = fake_lm_attach;
fake_bridge->lm_detach = fake_lm_detach;
fake_bridge->slot_get = fake_slot_get;
fake_bridge->alloc_consistent = fake_alloc_consistent;
fake_bridge->free_consistent = fake_free_consistent;
pr_info("Board is%s the VME system controller\n",
(geoid == 1) ? "" : " not");
pr_info("VME geographical address is set to %d\n", geoid);
retval = fake_crcsr_init(fake_bridge);
if (retval) {
pr_err("CR/CSR configuration failed.\n");
goto err_crcsr;
}
retval = vme_register_bridge(fake_bridge);
if (retval != 0) {
pr_err("Chip Registration failed.\n");
goto err_reg;
}
exit_pointer = fake_bridge;
return 0;
err_reg:
fake_crcsr_exit(fake_bridge);
err_crcsr:
err_lm:
/* resources are stored in link list */
list_for_each_safe(pos, n, &fake_bridge->lm_resources) {
lm = list_entry(pos, struct vme_lm_resource, list);
list_del(pos);
kfree(lm);
}
err_slave:
/* resources are stored in link list */
list_for_each_safe(pos, n, &fake_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
err_master:
/* resources are stored in link list */
list_for_each_safe(pos, n, &fake_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
kfree(fake_device);
err_driver:
kfree(fake_bridge);
err_struct:
return retval;
}
static void __exit fake_exit(void)
{
struct list_head *pos = NULL;
struct list_head *tmplist;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
int i;
struct vme_bridge *fake_bridge;
struct fake_driver *bridge;
fake_bridge = exit_pointer;
bridge = fake_bridge->driver_priv;
pr_debug("Driver is being unloaded.\n");
/*
* Shutdown all inbound and outbound windows.
*/
for (i = 0; i < FAKE_MAX_MASTER; i++)
bridge->masters[i].enabled = 0;
for (i = 0; i < FAKE_MAX_SLAVE; i++)
bridge->slaves[i].enabled = 0;
/*
* Shutdown Location monitor.
*/
bridge->lm_enabled = 0;
vme_unregister_bridge(fake_bridge);
fake_crcsr_exit(fake_bridge);
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &fake_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &fake_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
kfree(fake_bridge->driver_priv);
kfree(fake_bridge);
root_device_unregister(vme_root);
}
MODULE_PARM_DESC(geoid, "Set geographical addressing");
module_param(geoid, int, 0);
MODULE_DESCRIPTION("Fake VME bridge driver");
MODULE_LICENSE("GPL");
module_init(fake_init);
module_exit(fake_exit);
| linux-master | drivers/staging/vme_user/vme_fake.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* VME Bridge Framework
*
* Author: Martyn Welch <[email protected]>
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* Based on work by Tom Armistead and Ajit Prem
* Copyright 2004 Motorola Inc.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/syscalls.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include "vme.h"
#include "vme_bridge.h"
/* Bitmask and list of registered buses both protected by common mutex */
static unsigned int vme_bus_numbers;
static LIST_HEAD(vme_bus_list);
static DEFINE_MUTEX(vme_buses_lock);
static int __init vme_init(void);
static struct vme_dev *dev_to_vme_dev(struct device *dev)
{
return container_of(dev, struct vme_dev, dev);
}
/*
* Find the bridge that the resource is associated with.
*/
static struct vme_bridge *find_bridge(struct vme_resource *resource)
{
/* Get list to search */
switch (resource->type) {
case VME_MASTER:
return list_entry(resource->entry, struct vme_master_resource,
list)->parent;
case VME_SLAVE:
return list_entry(resource->entry, struct vme_slave_resource,
list)->parent;
case VME_DMA:
return list_entry(resource->entry, struct vme_dma_resource,
list)->parent;
case VME_LM:
return list_entry(resource->entry, struct vme_lm_resource,
list)->parent;
default:
printk(KERN_ERR "Unknown resource type\n");
return NULL;
}
}
/**
* vme_alloc_consistent - Allocate contiguous memory.
* @resource: Pointer to VME resource.
* @size: Size of allocation required.
* @dma: Pointer to variable to store physical address of allocation.
*
* Allocate a contiguous block of memory for use by the driver. This is used to
* create the buffers for the slave windows.
*
* Return: Virtual address of allocation on success, NULL on failure.
*/
void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
dma_addr_t *dma)
{
struct vme_bridge *bridge;
if (!resource) {
printk(KERN_ERR "No resource\n");
return NULL;
}
bridge = find_bridge(resource);
if (!bridge) {
printk(KERN_ERR "Can't find bridge\n");
return NULL;
}
if (!bridge->parent) {
printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
return NULL;
}
if (!bridge->alloc_consistent) {
printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
bridge->name);
return NULL;
}
return bridge->alloc_consistent(bridge->parent, size, dma);
}
EXPORT_SYMBOL(vme_alloc_consistent);
/**
* vme_free_consistent - Free previously allocated memory.
* @resource: Pointer to VME resource.
* @size: Size of allocation to free.
* @vaddr: Virtual address of allocation.
* @dma: Physical address of allocation.
*
* Free previously allocated block of contiguous memory.
*/
void vme_free_consistent(struct vme_resource *resource, size_t size,
void *vaddr, dma_addr_t dma)
{
struct vme_bridge *bridge;
if (!resource) {
printk(KERN_ERR "No resource\n");
return;
}
bridge = find_bridge(resource);
if (!bridge) {
printk(KERN_ERR "Can't find bridge\n");
return;
}
if (!bridge->parent) {
printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
return;
}
if (!bridge->free_consistent) {
printk(KERN_ERR "free_consistent not supported by bridge %s\n",
bridge->name);
return;
}
bridge->free_consistent(bridge->parent, size, vaddr, dma);
}
EXPORT_SYMBOL(vme_free_consistent);
/**
* vme_get_size - Helper function returning size of a VME window
* @resource: Pointer to VME slave or master resource.
*
* Determine the size of the VME window provided. This is a helper
* function, wrappering the call to vme_master_get or vme_slave_get
* depending on the type of window resource handed to it.
*
* Return: Size of the window on success, zero on failure.
*/
size_t vme_get_size(struct vme_resource *resource)
{
int enabled, retval;
unsigned long long base, size;
dma_addr_t buf_base;
u32 aspace, cycle, dwidth;
switch (resource->type) {
case VME_MASTER:
retval = vme_master_get(resource, &enabled, &base, &size,
&aspace, &cycle, &dwidth);
if (retval)
return 0;
return size;
case VME_SLAVE:
retval = vme_slave_get(resource, &enabled, &base, &size,
&buf_base, &aspace, &cycle);
if (retval)
return 0;
return size;
case VME_DMA:
return 0;
default:
printk(KERN_ERR "Unknown resource type\n");
return 0;
}
}
EXPORT_SYMBOL(vme_get_size);
int vme_check_window(u32 aspace, unsigned long long vme_base,
unsigned long long size)
{
int retval = 0;
if (vme_base + size < size)
return -EINVAL;
switch (aspace) {
case VME_A16:
if (vme_base + size > VME_A16_MAX)
retval = -EFAULT;
break;
case VME_A24:
if (vme_base + size > VME_A24_MAX)
retval = -EFAULT;
break;
case VME_A32:
if (vme_base + size > VME_A32_MAX)
retval = -EFAULT;
break;
case VME_A64:
/* The VME_A64_MAX limit is actually U64_MAX + 1 */
break;
case VME_CRCSR:
if (vme_base + size > VME_CRCSR_MAX)
retval = -EFAULT;
break;
case VME_USER1:
case VME_USER2:
case VME_USER3:
case VME_USER4:
/* User Defined */
break;
default:
printk(KERN_ERR "Invalid address space\n");
retval = -EINVAL;
break;
}
return retval;
}
EXPORT_SYMBOL(vme_check_window);
static u32 vme_get_aspace(int am)
{
switch (am) {
case 0x29:
case 0x2D:
return VME_A16;
case 0x38:
case 0x39:
case 0x3A:
case 0x3B:
case 0x3C:
case 0x3D:
case 0x3E:
case 0x3F:
return VME_A24;
case 0x8:
case 0x9:
case 0xA:
case 0xB:
case 0xC:
case 0xD:
case 0xE:
case 0xF:
return VME_A32;
case 0x0:
case 0x1:
case 0x3:
return VME_A64;
}
return 0;
}
/**
* vme_slave_request - Request a VME slave window resource.
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
* @address: Required VME address space.
* @cycle: Required VME data transfer cycle type.
*
* Request use of a VME window resource capable of being set for the requested
* address space and data transfer cycle.
*
* Return: Pointer to VME resource on success, NULL on failure.
*/
struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
u32 cycle)
{
struct vme_bridge *bridge;
struct list_head *slave_pos = NULL;
struct vme_slave_resource *allocated_image = NULL;
struct vme_slave_resource *slave_image = NULL;
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
/* Loop through slave resources */
list_for_each(slave_pos, &bridge->slave_resources) {
slave_image = list_entry(slave_pos,
struct vme_slave_resource, list);
if (!slave_image) {
printk(KERN_ERR "Registered NULL Slave resource\n");
continue;
}
/* Find an unlocked and compatible image */
mutex_lock(&slave_image->mtx);
if (((slave_image->address_attr & address) == address) &&
((slave_image->cycle_attr & cycle) == cycle) &&
(slave_image->locked == 0)) {
slave_image->locked = 1;
mutex_unlock(&slave_image->mtx);
allocated_image = slave_image;
break;
}
mutex_unlock(&slave_image->mtx);
}
/* No free image */
if (!allocated_image)
goto err_image;
resource = kmalloc(sizeof(*resource), GFP_KERNEL);
if (!resource)
goto err_alloc;
resource->type = VME_SLAVE;
resource->entry = &allocated_image->list;
return resource;
err_alloc:
/* Unlock image */
mutex_lock(&slave_image->mtx);
slave_image->locked = 0;
mutex_unlock(&slave_image->mtx);
err_image:
err_bus:
return NULL;
}
EXPORT_SYMBOL(vme_slave_request);
/**
* vme_slave_set - Set VME slave window configuration.
* @resource: Pointer to VME slave resource.
* @enabled: State to which the window should be configured.
* @vme_base: Base address for the window.
* @size: Size of the VME window.
* @buf_base: Based address of buffer used to provide VME slave window storage.
* @aspace: VME address space for the VME window.
* @cycle: VME data transfer cycle type for the VME window.
*
* Set configuration for provided VME slave window.
*
* Return: Zero on success, -EINVAL if operation is not supported on this
* device, if an invalid resource has been provided or invalid
* attributes are provided. Hardware specific errors may also be
* returned.
*/
int vme_slave_set(struct vme_resource *resource, int enabled,
unsigned long long vme_base, unsigned long long size,
dma_addr_t buf_base, u32 aspace, u32 cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
int retval;
if (resource->type != VME_SLAVE) {
printk(KERN_ERR "Not a slave resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_slave_resource, list);
if (!bridge->slave_set) {
printk(KERN_ERR "Function not supported\n");
return -ENOSYS;
}
if (!(((image->address_attr & aspace) == aspace) &&
((image->cycle_attr & cycle) == cycle))) {
printk(KERN_ERR "Invalid attributes\n");
return -EINVAL;
}
retval = vme_check_window(aspace, vme_base, size);
if (retval)
return retval;
return bridge->slave_set(image, enabled, vme_base, size, buf_base,
aspace, cycle);
}
EXPORT_SYMBOL(vme_slave_set);
/**
* vme_slave_get - Retrieve VME slave window configuration.
* @resource: Pointer to VME slave resource.
* @enabled: Pointer to variable for storing state.
* @vme_base: Pointer to variable for storing window base address.
* @size: Pointer to variable for storing window size.
* @buf_base: Pointer to variable for storing slave buffer base address.
* @aspace: Pointer to variable for storing VME address space.
* @cycle: Pointer to variable for storing VME data transfer cycle type.
*
* Return configuration for provided VME slave window.
*
* Return: Zero on success, -EINVAL if operation is not supported on this
* device or if an invalid resource has been provided.
*/
int vme_slave_get(struct vme_resource *resource, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
if (resource->type != VME_SLAVE) {
printk(KERN_ERR "Not a slave resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_slave_resource, list);
if (!bridge->slave_get) {
printk(KERN_ERR "vme_slave_get not supported\n");
return -EINVAL;
}
return bridge->slave_get(image, enabled, vme_base, size, buf_base,
aspace, cycle);
}
EXPORT_SYMBOL(vme_slave_get);
/**
* vme_slave_free - Free VME slave window
* @resource: Pointer to VME slave resource.
*
* Free the provided slave resource so that it may be reallocated.
*/
void vme_slave_free(struct vme_resource *resource)
{
struct vme_slave_resource *slave_image;
if (resource->type != VME_SLAVE) {
printk(KERN_ERR "Not a slave resource\n");
return;
}
slave_image = list_entry(resource->entry, struct vme_slave_resource,
list);
if (!slave_image) {
printk(KERN_ERR "Can't find slave resource\n");
return;
}
/* Unlock image */
mutex_lock(&slave_image->mtx);
if (slave_image->locked == 0)
printk(KERN_ERR "Image is already free\n");
slave_image->locked = 0;
mutex_unlock(&slave_image->mtx);
/* Free up resource memory */
kfree(resource);
}
EXPORT_SYMBOL(vme_slave_free);
/**
* vme_master_request - Request a VME master window resource.
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
* @address: Required VME address space.
* @cycle: Required VME data transfer cycle type.
* @dwidth: Required VME data transfer width.
*
* Request use of a VME window resource capable of being set for the requested
* address space, data transfer cycle and width.
*
* Return: Pointer to VME resource on success, NULL on failure.
*/
struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
u32 cycle, u32 dwidth)
{
struct vme_bridge *bridge;
struct list_head *master_pos = NULL;
struct vme_master_resource *allocated_image = NULL;
struct vme_master_resource *master_image = NULL;
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
/* Loop through master resources */
list_for_each(master_pos, &bridge->master_resources) {
master_image = list_entry(master_pos,
struct vme_master_resource, list);
if (!master_image) {
printk(KERN_WARNING "Registered NULL master resource\n");
continue;
}
/* Find an unlocked and compatible image */
spin_lock(&master_image->lock);
if (((master_image->address_attr & address) == address) &&
((master_image->cycle_attr & cycle) == cycle) &&
((master_image->width_attr & dwidth) == dwidth) &&
(master_image->locked == 0)) {
master_image->locked = 1;
spin_unlock(&master_image->lock);
allocated_image = master_image;
break;
}
spin_unlock(&master_image->lock);
}
/* Check to see if we found a resource */
if (!allocated_image) {
printk(KERN_ERR "Can't find a suitable resource\n");
goto err_image;
}
resource = kmalloc(sizeof(*resource), GFP_KERNEL);
if (!resource)
goto err_alloc;
resource->type = VME_MASTER;
resource->entry = &allocated_image->list;
return resource;
err_alloc:
/* Unlock image */
spin_lock(&master_image->lock);
master_image->locked = 0;
spin_unlock(&master_image->lock);
err_image:
err_bus:
return NULL;
}
EXPORT_SYMBOL(vme_master_request);
/**
* vme_master_set - Set VME master window configuration.
* @resource: Pointer to VME master resource.
* @enabled: State to which the window should be configured.
* @vme_base: Base address for the window.
* @size: Size of the VME window.
* @aspace: VME address space for the VME window.
* @cycle: VME data transfer cycle type for the VME window.
* @dwidth: VME data transfer width for the VME window.
*
* Set configuration for provided VME master window.
*
* Return: Zero on success, -EINVAL if operation is not supported on this
* device, if an invalid resource has been provided or invalid
* attributes are provided. Hardware specific errors may also be
* returned.
*/
int vme_master_set(struct vme_resource *resource, int enabled,
unsigned long long vme_base, unsigned long long size,
u32 aspace, u32 cycle, u32 dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
int retval;
if (resource->type != VME_MASTER) {
printk(KERN_ERR "Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
if (!bridge->master_set) {
printk(KERN_WARNING "vme_master_set not supported\n");
return -EINVAL;
}
if (!(((image->address_attr & aspace) == aspace) &&
((image->cycle_attr & cycle) == cycle) &&
((image->width_attr & dwidth) == dwidth))) {
printk(KERN_WARNING "Invalid attributes\n");
return -EINVAL;
}
retval = vme_check_window(aspace, vme_base, size);
if (retval)
return retval;
return bridge->master_set(image, enabled, vme_base, size, aspace,
cycle, dwidth);
}
EXPORT_SYMBOL(vme_master_set);
/**
* vme_master_get - Retrieve VME master window configuration.
* @resource: Pointer to VME master resource.
* @enabled: Pointer to variable for storing state.
* @vme_base: Pointer to variable for storing window base address.
* @size: Pointer to variable for storing window size.
* @aspace: Pointer to variable for storing VME address space.
* @cycle: Pointer to variable for storing VME data transfer cycle type.
* @dwidth: Pointer to variable for storing VME data transfer width.
*
* Return configuration for provided VME master window.
*
* Return: Zero on success, -EINVAL if operation is not supported on this
* device or if an invalid resource has been provided.
*/
int vme_master_get(struct vme_resource *resource, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
u32 *aspace, u32 *cycle, u32 *dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
if (resource->type != VME_MASTER) {
printk(KERN_ERR "Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
if (!bridge->master_get) {
printk(KERN_WARNING "%s not supported\n", __func__);
return -EINVAL;
}
return bridge->master_get(image, enabled, vme_base, size, aspace,
cycle, dwidth);
}
EXPORT_SYMBOL(vme_master_get);
/**
* vme_master_read - Read data from VME space into a buffer.
* @resource: Pointer to VME master resource.
* @buf: Pointer to buffer where data should be transferred.
* @count: Number of bytes to transfer.
* @offset: Offset into VME master window at which to start transfer.
*
* Perform read of count bytes of data from location on VME bus which maps into
* the VME master window at offset to buf.
*
* Return: Number of bytes read, -EINVAL if resource is not a VME master
* resource or read operation is not supported. -EFAULT returned if
* invalid offset is provided. Hardware specific errors may also be
* returned.
*/
ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
loff_t offset)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
size_t length;
if (!bridge->master_read) {
printk(KERN_WARNING "Reading from resource not supported\n");
return -EINVAL;
}
if (resource->type != VME_MASTER) {
printk(KERN_ERR "Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
length = vme_get_size(resource);
if (offset > length) {
printk(KERN_WARNING "Invalid Offset\n");
return -EFAULT;
}
if ((offset + count) > length)
count = length - offset;
return bridge->master_read(image, buf, count, offset);
}
EXPORT_SYMBOL(vme_master_read);
/**
* vme_master_write - Write data out to VME space from a buffer.
* @resource: Pointer to VME master resource.
* @buf: Pointer to buffer holding data to transfer.
* @count: Number of bytes to transfer.
* @offset: Offset into VME master window at which to start transfer.
*
* Perform write of count bytes of data from buf to location on VME bus which
* maps into the VME master window at offset.
*
* Return: Number of bytes written, -EINVAL if resource is not a VME master
* resource or write operation is not supported. -EFAULT returned if
* invalid offset is provided. Hardware specific errors may also be
* returned.
*/
ssize_t vme_master_write(struct vme_resource *resource, void *buf,
size_t count, loff_t offset)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
size_t length;
if (!bridge->master_write) {
printk(KERN_WARNING "Writing to resource not supported\n");
return -EINVAL;
}
if (resource->type != VME_MASTER) {
printk(KERN_ERR "Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
length = vme_get_size(resource);
if (offset > length) {
printk(KERN_WARNING "Invalid Offset\n");
return -EFAULT;
}
if ((offset + count) > length)
count = length - offset;
return bridge->master_write(image, buf, count, offset);
}
EXPORT_SYMBOL(vme_master_write);
/**
* vme_master_rmw - Perform read-modify-write cycle.
* @resource: Pointer to VME master resource.
* @mask: Bits to be compared and swapped in operation.
* @compare: Bits to be compared with data read from offset.
* @swap: Bits to be swapped in data read from offset.
* @offset: Offset into VME master window at which to perform operation.
*
* Perform read-modify-write cycle on provided location:
* - Location on VME bus is read.
* - Bits selected by mask are compared with compare.
* - Where a selected bit matches that in compare and are selected in swap,
* the bit is swapped.
* - Result written back to location on VME bus.
*
* Return: Bytes written on success, -EINVAL if resource is not a VME master
* resource or RMW operation is not supported. Hardware specific
* errors may also be returned.
*/
unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
unsigned int compare, unsigned int swap, loff_t offset)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
if (!bridge->master_rmw) {
printk(KERN_WARNING "Writing to resource not supported\n");
return -EINVAL;
}
if (resource->type != VME_MASTER) {
printk(KERN_ERR "Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
return bridge->master_rmw(image, mask, compare, swap, offset);
}
EXPORT_SYMBOL(vme_master_rmw);
/**
* vme_master_mmap - Mmap region of VME master window.
* @resource: Pointer to VME master resource.
* @vma: Pointer to definition of user mapping.
*
* Memory map a region of the VME master window into user space.
*
* Return: Zero on success, -EINVAL if resource is not a VME master
* resource or -EFAULT if map exceeds window size. Other generic mmap
* errors may also be returned.
*/
int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
{
struct vme_master_resource *image;
phys_addr_t phys_addr;
unsigned long vma_size;
if (resource->type != VME_MASTER) {
pr_err("Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
vma_size = vma->vm_end - vma->vm_start;
if (phys_addr + vma_size > image->bus_resource.end + 1) {
pr_err("Map size cannot exceed the window size\n");
return -EFAULT;
}
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
}
EXPORT_SYMBOL(vme_master_mmap);
/**
* vme_master_free - Free VME master window
* @resource: Pointer to VME master resource.
*
* Free the provided master resource so that it may be reallocated.
*/
void vme_master_free(struct vme_resource *resource)
{
struct vme_master_resource *master_image;
if (resource->type != VME_MASTER) {
printk(KERN_ERR "Not a master resource\n");
return;
}
master_image = list_entry(resource->entry, struct vme_master_resource,
list);
if (!master_image) {
printk(KERN_ERR "Can't find master resource\n");
return;
}
/* Unlock image */
spin_lock(&master_image->lock);
if (master_image->locked == 0)
printk(KERN_ERR "Image is already free\n");
master_image->locked = 0;
spin_unlock(&master_image->lock);
/* Free up resource memory */
kfree(resource);
}
EXPORT_SYMBOL(vme_master_free);
/**
* vme_dma_request - Request a DMA controller.
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
* @route: Required src/destination combination.
*
* Request a VME DMA controller with capability to perform transfers bewteen
* requested source/destination combination.
*
* Return: Pointer to VME DMA resource on success, NULL on failure.
*/
struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
{
struct vme_bridge *bridge;
struct list_head *dma_pos = NULL;
struct vme_dma_resource *allocated_ctrlr = NULL;
struct vme_dma_resource *dma_ctrlr = NULL;
struct vme_resource *resource = NULL;
/* XXX Not checking resource attributes */
printk(KERN_ERR "No VME resource Attribute tests done\n");
bridge = vdev->bridge;
if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
/* Loop through DMA resources */
list_for_each(dma_pos, &bridge->dma_resources) {
dma_ctrlr = list_entry(dma_pos,
struct vme_dma_resource, list);
if (!dma_ctrlr) {
printk(KERN_ERR "Registered NULL DMA resource\n");
continue;
}
/* Find an unlocked and compatible controller */
mutex_lock(&dma_ctrlr->mtx);
if (((dma_ctrlr->route_attr & route) == route) &&
(dma_ctrlr->locked == 0)) {
dma_ctrlr->locked = 1;
mutex_unlock(&dma_ctrlr->mtx);
allocated_ctrlr = dma_ctrlr;
break;
}
mutex_unlock(&dma_ctrlr->mtx);
}
/* Check to see if we found a resource */
if (!allocated_ctrlr)
goto err_ctrlr;
resource = kmalloc(sizeof(*resource), GFP_KERNEL);
if (!resource)
goto err_alloc;
resource->type = VME_DMA;
resource->entry = &allocated_ctrlr->list;
return resource;
err_alloc:
/* Unlock image */
mutex_lock(&dma_ctrlr->mtx);
dma_ctrlr->locked = 0;
mutex_unlock(&dma_ctrlr->mtx);
err_ctrlr:
err_bus:
return NULL;
}
EXPORT_SYMBOL(vme_dma_request);
/**
* vme_new_dma_list - Create new VME DMA list.
* @resource: Pointer to VME DMA resource.
*
* Create a new VME DMA list. It is the responsibility of the user to free
* the list once it is no longer required with vme_dma_list_free().
*
* Return: Pointer to new VME DMA list, NULL on allocation failure or invalid
* VME DMA resource.
*/
struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
{
struct vme_dma_list *dma_list;
if (resource->type != VME_DMA) {
printk(KERN_ERR "Not a DMA resource\n");
return NULL;
}
dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
if (!dma_list)
return NULL;
INIT_LIST_HEAD(&dma_list->entries);
dma_list->parent = list_entry(resource->entry,
struct vme_dma_resource,
list);
mutex_init(&dma_list->mtx);
return dma_list;
}
EXPORT_SYMBOL(vme_new_dma_list);
/**
* vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute.
* @pattern: Value to use used as pattern
* @type: Type of pattern to be written.
*
* Create VME DMA list attribute for pattern generation. It is the
* responsibility of the user to free used attributes using
* vme_dma_free_attribute().
*
* Return: Pointer to VME DMA attribute, NULL on failure.
*/
struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
{
struct vme_dma_attr *attributes;
struct vme_dma_pattern *pattern_attr;
attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
if (!attributes)
goto err_attr;
pattern_attr = kmalloc(sizeof(*pattern_attr), GFP_KERNEL);
if (!pattern_attr)
goto err_pat;
attributes->type = VME_DMA_PATTERN;
attributes->private = (void *)pattern_attr;
pattern_attr->pattern = pattern;
pattern_attr->type = type;
return attributes;
err_pat:
kfree(attributes);
err_attr:
return NULL;
}
EXPORT_SYMBOL(vme_dma_pattern_attribute);
/**
* vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute.
* @address: PCI base address for DMA transfer.
*
* Create VME DMA list attribute pointing to a location on PCI for DMA
* transfers. It is the responsibility of the user to free used attributes
* using vme_dma_free_attribute().
*
* Return: Pointer to VME DMA attribute, NULL on failure.
*/
struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
{
struct vme_dma_attr *attributes;
struct vme_dma_pci *pci_attr;
/* XXX Run some sanity checks here */
attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
if (!attributes)
goto err_attr;
pci_attr = kmalloc(sizeof(*pci_attr), GFP_KERNEL);
if (!pci_attr)
goto err_pci;
attributes->type = VME_DMA_PCI;
attributes->private = (void *)pci_attr;
pci_attr->address = address;
return attributes;
err_pci:
kfree(attributes);
err_attr:
return NULL;
}
EXPORT_SYMBOL(vme_dma_pci_attribute);
/**
* vme_dma_vme_attribute - Create "VME" type VME DMA list attribute.
* @address: VME base address for DMA transfer.
* @aspace: VME address space to use for DMA transfer.
* @cycle: VME bus cycle to use for DMA transfer.
* @dwidth: VME data width to use for DMA transfer.
*
* Create VME DMA list attribute pointing to a location on the VME bus for DMA
* transfers. It is the responsibility of the user to free used attributes
* using vme_dma_free_attribute().
*
* Return: Pointer to VME DMA attribute, NULL on failure.
*/
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
u32 aspace, u32 cycle, u32 dwidth)
{
struct vme_dma_attr *attributes;
struct vme_dma_vme *vme_attr;
attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
if (!attributes)
goto err_attr;
vme_attr = kmalloc(sizeof(*vme_attr), GFP_KERNEL);
if (!vme_attr)
goto err_vme;
attributes->type = VME_DMA_VME;
attributes->private = (void *)vme_attr;
vme_attr->address = address;
vme_attr->aspace = aspace;
vme_attr->cycle = cycle;
vme_attr->dwidth = dwidth;
return attributes;
err_vme:
kfree(attributes);
err_attr:
return NULL;
}
EXPORT_SYMBOL(vme_dma_vme_attribute);
/**
* vme_dma_free_attribute - Free DMA list attribute.
* @attributes: Pointer to DMA list attribute.
*
* Free VME DMA list attribute. VME DMA list attributes can be safely freed
* once vme_dma_list_add() has returned.
*/
void vme_dma_free_attribute(struct vme_dma_attr *attributes)
{
kfree(attributes->private);
kfree(attributes);
}
EXPORT_SYMBOL(vme_dma_free_attribute);
/**
* vme_dma_list_add - Add enty to a VME DMA list.
* @list: Pointer to VME list.
* @src: Pointer to DMA list attribute to use as source.
* @dest: Pointer to DMA list attribute to use as destination.
* @count: Number of bytes to transfer.
*
* Add an entry to the provided VME DMA list. Entry requires pointers to source
* and destination DMA attributes and a count.
*
* Please note, the attributes supported as source and destinations for
* transfers are hardware dependent.
*
* Return: Zero on success, -EINVAL if operation is not supported on this
* device or if the link list has already been submitted for execution.
* Hardware specific errors also possible.
*/
int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
struct vme_dma_attr *dest, size_t count)
{
struct vme_bridge *bridge = list->parent->parent;
int retval;
if (!bridge->dma_list_add) {
printk(KERN_WARNING "Link List DMA generation not supported\n");
return -EINVAL;
}
if (!mutex_trylock(&list->mtx)) {
printk(KERN_ERR "Link List already submitted\n");
return -EINVAL;
}
retval = bridge->dma_list_add(list, src, dest, count);
mutex_unlock(&list->mtx);
return retval;
}
EXPORT_SYMBOL(vme_dma_list_add);
/**
* vme_dma_list_exec - Queue a VME DMA list for execution.
* @list: Pointer to VME list.
*
* Queue the provided VME DMA list for execution. The call will return once the
* list has been executed.
*
* Return: Zero on success, -EINVAL if operation is not supported on this
* device. Hardware specific errors also possible.
*/
int vme_dma_list_exec(struct vme_dma_list *list)
{
struct vme_bridge *bridge = list->parent->parent;
int retval;
if (!bridge->dma_list_exec) {
printk(KERN_ERR "Link List DMA execution not supported\n");
return -EINVAL;
}
mutex_lock(&list->mtx);
retval = bridge->dma_list_exec(list);
mutex_unlock(&list->mtx);
return retval;
}
EXPORT_SYMBOL(vme_dma_list_exec);
/**
* vme_dma_list_free - Free a VME DMA list.
* @list: Pointer to VME list.
*
* Free the provided DMA list and all its entries.
*
* Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
* is still in use. Hardware specific errors also possible.
*/
int vme_dma_list_free(struct vme_dma_list *list)
{
struct vme_bridge *bridge = list->parent->parent;
int retval;
if (!bridge->dma_list_empty) {
printk(KERN_WARNING "Emptying of Link Lists not supported\n");
return -EINVAL;
}
if (!mutex_trylock(&list->mtx)) {
printk(KERN_ERR "Link List in use\n");
return -EBUSY;
}
/*
* Empty out all of the entries from the DMA list. We need to go to the
* low level driver as DMA entries are driver specific.
*/
retval = bridge->dma_list_empty(list);
if (retval) {
printk(KERN_ERR "Unable to empty link-list entries\n");
mutex_unlock(&list->mtx);
return retval;
}
mutex_unlock(&list->mtx);
kfree(list);
return retval;
}
EXPORT_SYMBOL(vme_dma_list_free);
/**
* vme_dma_free - Free a VME DMA resource.
* @resource: Pointer to VME DMA resource.
*
* Free the provided DMA resource so that it may be reallocated.
*
* Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
* is still active.
*/
int vme_dma_free(struct vme_resource *resource)
{
struct vme_dma_resource *ctrlr;
if (resource->type != VME_DMA) {
printk(KERN_ERR "Not a DMA resource\n");
return -EINVAL;
}
ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
if (!mutex_trylock(&ctrlr->mtx)) {
printk(KERN_ERR "Resource busy, can't free\n");
return -EBUSY;
}
if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
printk(KERN_WARNING "Resource still processing transfers\n");
mutex_unlock(&ctrlr->mtx);
return -EBUSY;
}
ctrlr->locked = 0;
mutex_unlock(&ctrlr->mtx);
kfree(resource);
return 0;
}
EXPORT_SYMBOL(vme_dma_free);
void vme_bus_error_handler(struct vme_bridge *bridge,
unsigned long long address, int am)
{
struct list_head *handler_pos = NULL;
struct vme_error_handler *handler;
int handler_triggered = 0;
u32 aspace = vme_get_aspace(am);
list_for_each(handler_pos, &bridge->vme_error_handlers) {
handler = list_entry(handler_pos, struct vme_error_handler,
list);
if ((aspace == handler->aspace) &&
(address >= handler->start) &&
(address < handler->end)) {
if (!handler->num_errors)
handler->first_error = address;
if (handler->num_errors != UINT_MAX)
handler->num_errors++;
handler_triggered = 1;
}
}
if (!handler_triggered)
dev_err(bridge->parent,
"Unhandled VME access error at address 0x%llx\n",
address);
}
EXPORT_SYMBOL(vme_bus_error_handler);
struct vme_error_handler *vme_register_error_handler(struct vme_bridge *bridge, u32 aspace,
unsigned long long address, size_t len)
{
struct vme_error_handler *handler;
handler = kmalloc(sizeof(*handler), GFP_ATOMIC);
if (!handler)
return NULL;
handler->aspace = aspace;
handler->start = address;
handler->end = address + len;
handler->num_errors = 0;
handler->first_error = 0;
list_add_tail(&handler->list, &bridge->vme_error_handlers);
return handler;
}
EXPORT_SYMBOL(vme_register_error_handler);
void vme_unregister_error_handler(struct vme_error_handler *handler)
{
list_del(&handler->list);
kfree(handler);
}
EXPORT_SYMBOL(vme_unregister_error_handler);
void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
{
void (*call)(int, int, void *);
void *priv_data;
call = bridge->irq[level - 1].callback[statid].func;
priv_data = bridge->irq[level - 1].callback[statid].priv_data;
if (call)
call(level, statid, priv_data);
else
printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
level, statid);
}
EXPORT_SYMBOL(vme_irq_handler);
/**
* vme_irq_request - Request a specific VME interrupt.
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
* @level: Interrupt priority being requested.
* @statid: Interrupt vector being requested.
* @callback: Pointer to callback function called when VME interrupt/vector
* received.
* @priv_data: Generic pointer that will be passed to the callback function.
*
* Request callback to be attached as a handler for VME interrupts with provided
* level and statid.
*
* Return: Zero on success, -EINVAL on invalid vme device, level or if the
* function is not supported, -EBUSY if the level/statid combination is
* already in use. Hardware specific errors also possible.
*/
int vme_irq_request(struct vme_dev *vdev, int level, int statid,
void (*callback)(int, int, void *),
void *priv_data)
{
struct vme_bridge *bridge;
bridge = vdev->bridge;
if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
if ((level < 1) || (level > 7)) {
printk(KERN_ERR "Invalid interrupt level\n");
return -EINVAL;
}
if (!bridge->irq_set) {
printk(KERN_ERR "Configuring interrupts not supported\n");
return -EINVAL;
}
mutex_lock(&bridge->irq_mtx);
if (bridge->irq[level - 1].callback[statid].func) {
mutex_unlock(&bridge->irq_mtx);
printk(KERN_WARNING "VME Interrupt already taken\n");
return -EBUSY;
}
bridge->irq[level - 1].count++;
bridge->irq[level - 1].callback[statid].priv_data = priv_data;
bridge->irq[level - 1].callback[statid].func = callback;
/* Enable IRQ level */
bridge->irq_set(bridge, level, 1, 1);
mutex_unlock(&bridge->irq_mtx);
return 0;
}
EXPORT_SYMBOL(vme_irq_request);
/**
* vme_irq_free - Free a VME interrupt.
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
* @level: Interrupt priority of interrupt being freed.
* @statid: Interrupt vector of interrupt being freed.
*
* Remove previously attached callback from VME interrupt priority/vector.
*/
void vme_irq_free(struct vme_dev *vdev, int level, int statid)
{
struct vme_bridge *bridge;
bridge = vdev->bridge;
if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return;
}
if ((level < 1) || (level > 7)) {
printk(KERN_ERR "Invalid interrupt level\n");
return;
}
if (!bridge->irq_set) {
printk(KERN_ERR "Configuring interrupts not supported\n");
return;
}
mutex_lock(&bridge->irq_mtx);
bridge->irq[level - 1].count--;
/* Disable IRQ level if no more interrupts attached at this level*/
if (bridge->irq[level - 1].count == 0)
bridge->irq_set(bridge, level, 0, 1);
bridge->irq[level - 1].callback[statid].func = NULL;
bridge->irq[level - 1].callback[statid].priv_data = NULL;
mutex_unlock(&bridge->irq_mtx);
}
EXPORT_SYMBOL(vme_irq_free);
/**
* vme_irq_generate - Generate VME interrupt.
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
* @level: Interrupt priority at which to assert the interrupt.
* @statid: Interrupt vector to associate with the interrupt.
*
* Generate a VME interrupt of the provided level and with the provided
* statid.
*
* Return: Zero on success, -EINVAL on invalid vme device, level or if the
* function is not supported. Hardware specific errors also possible.
*/
int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
{
struct vme_bridge *bridge;
bridge = vdev->bridge;
if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
if ((level < 1) || (level > 7)) {
printk(KERN_WARNING "Invalid interrupt level\n");
return -EINVAL;
}
if (!bridge->irq_generate) {
printk(KERN_WARNING "Interrupt generation not supported\n");
return -EINVAL;
}
return bridge->irq_generate(bridge, level, statid);
}
EXPORT_SYMBOL(vme_irq_generate);
/**
* vme_lm_request - Request a VME location monitor
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
*
* Allocate a location monitor resource to the driver. A location monitor
* allows the driver to monitor accesses to a contiguous number of
* addresses on the VME bus.
*
* Return: Pointer to a VME resource on success or NULL on failure.
*/
struct vme_resource *vme_lm_request(struct vme_dev *vdev)
{
struct vme_bridge *bridge;
struct list_head *lm_pos = NULL;
struct vme_lm_resource *allocated_lm = NULL;
struct vme_lm_resource *lm = NULL;
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
/* Loop through LM resources */
list_for_each(lm_pos, &bridge->lm_resources) {
lm = list_entry(lm_pos,
struct vme_lm_resource, list);
if (!lm) {
printk(KERN_ERR "Registered NULL Location Monitor resource\n");
continue;
}
/* Find an unlocked controller */
mutex_lock(&lm->mtx);
if (lm->locked == 0) {
lm->locked = 1;
mutex_unlock(&lm->mtx);
allocated_lm = lm;
break;
}
mutex_unlock(&lm->mtx);
}
/* Check to see if we found a resource */
if (!allocated_lm)
goto err_lm;
resource = kmalloc(sizeof(*resource), GFP_KERNEL);
if (!resource)
goto err_alloc;
resource->type = VME_LM;
resource->entry = &allocated_lm->list;
return resource;
err_alloc:
/* Unlock image */
mutex_lock(&lm->mtx);
lm->locked = 0;
mutex_unlock(&lm->mtx);
err_lm:
err_bus:
return NULL;
}
EXPORT_SYMBOL(vme_lm_request);
/**
* vme_lm_count - Determine number of VME Addresses monitored
* @resource: Pointer to VME location monitor resource.
*
* The number of contiguous addresses monitored is hardware dependent.
* Return the number of contiguous addresses monitored by the
* location monitor.
*
* Return: Count of addresses monitored or -EINVAL when provided with an
* invalid location monitor resource.
*/
int vme_lm_count(struct vme_resource *resource)
{
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
return lm->monitors;
}
EXPORT_SYMBOL(vme_lm_count);
/**
* vme_lm_set - Configure location monitor
* @resource: Pointer to VME location monitor resource.
* @lm_base: Base address to monitor.
* @aspace: VME address space to monitor.
* @cycle: VME bus cycle type to monitor.
*
* Set the base address, address space and cycle type of accesses to be
* monitored by the location monitor.
*
* Return: Zero on success, -EINVAL when provided with an invalid location
* monitor resource or function is not supported. Hardware specific
* errors may also be returned.
*/
int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
u32 aspace, u32 cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (!bridge->lm_set) {
printk(KERN_ERR "vme_lm_set not supported\n");
return -EINVAL;
}
return bridge->lm_set(lm, lm_base, aspace, cycle);
}
EXPORT_SYMBOL(vme_lm_set);
/**
* vme_lm_get - Retrieve location monitor settings
* @resource: Pointer to VME location monitor resource.
* @lm_base: Pointer used to output the base address monitored.
* @aspace: Pointer used to output the address space monitored.
* @cycle: Pointer used to output the VME bus cycle type monitored.
*
* Retrieve the base address, address space and cycle type of accesses to
* be monitored by the location monitor.
*
* Return: Zero on success, -EINVAL when provided with an invalid location
* monitor resource or function is not supported. Hardware specific
* errors may also be returned.
*/
int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
u32 *aspace, u32 *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (!bridge->lm_get) {
printk(KERN_ERR "vme_lm_get not supported\n");
return -EINVAL;
}
return bridge->lm_get(lm, lm_base, aspace, cycle);
}
EXPORT_SYMBOL(vme_lm_get);
/**
* vme_lm_attach - Provide callback for location monitor address
* @resource: Pointer to VME location monitor resource.
* @monitor: Offset to which callback should be attached.
* @callback: Pointer to callback function called when triggered.
* @data: Generic pointer that will be passed to the callback function.
*
* Attach a callback to the specificed offset into the location monitors
* monitored addresses. A generic pointer is provided to allow data to be
* passed to the callback when called.
*
* Return: Zero on success, -EINVAL when provided with an invalid location
* monitor resource or function is not supported. Hardware specific
* errors may also be returned.
*/
int vme_lm_attach(struct vme_resource *resource, int monitor,
void (*callback)(void *), void *data)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (!bridge->lm_attach) {
printk(KERN_ERR "vme_lm_attach not supported\n");
return -EINVAL;
}
return bridge->lm_attach(lm, monitor, callback, data);
}
EXPORT_SYMBOL(vme_lm_attach);
/**
* vme_lm_detach - Remove callback for location monitor address
* @resource: Pointer to VME location monitor resource.
* @monitor: Offset to which callback should be removed.
*
* Remove the callback associated with the specificed offset into the
* location monitors monitored addresses.
*
* Return: Zero on success, -EINVAL when provided with an invalid location
* monitor resource or function is not supported. Hardware specific
* errors may also be returned.
*/
int vme_lm_detach(struct vme_resource *resource, int monitor)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (!bridge->lm_detach) {
printk(KERN_ERR "vme_lm_detach not supported\n");
return -EINVAL;
}
return bridge->lm_detach(lm, monitor);
}
EXPORT_SYMBOL(vme_lm_detach);
/**
* vme_lm_free - Free allocated VME location monitor
* @resource: Pointer to VME location monitor resource.
*
* Free allocation of a VME location monitor.
*
* WARNING: This function currently expects that any callbacks that have
* been attached to the location monitor have been removed.
*
* Return: Zero on success, -EINVAL when provided with an invalid location
* monitor resource.
*/
void vme_lm_free(struct vme_resource *resource)
{
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
mutex_lock(&lm->mtx);
/* XXX
* Check to see that there aren't any callbacks still attached, if
* there are we should probably be detaching them!
*/
lm->locked = 0;
mutex_unlock(&lm->mtx);
kfree(resource);
}
EXPORT_SYMBOL(vme_lm_free);
/**
* vme_slot_num - Retrieve slot ID
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
*
* Retrieve the slot ID associated with the provided VME device.
*
* Return: The slot ID on success, -EINVAL if VME bridge cannot be determined
* or the function is not supported. Hardware specific errors may also
* be returned.
*/
int vme_slot_num(struct vme_dev *vdev)
{
struct vme_bridge *bridge;
bridge = vdev->bridge;
if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
if (!bridge->slot_get) {
printk(KERN_WARNING "vme_slot_num not supported\n");
return -EINVAL;
}
return bridge->slot_get(bridge);
}
EXPORT_SYMBOL(vme_slot_num);
/**
* vme_bus_num - Retrieve bus number
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
*
* Retrieve the bus enumeration associated with the provided VME device.
*
* Return: The bus number on success, -EINVAL if VME bridge cannot be
* determined.
*/
int vme_bus_num(struct vme_dev *vdev)
{
struct vme_bridge *bridge;
bridge = vdev->bridge;
if (!bridge) {
pr_err("Can't find VME bus\n");
return -EINVAL;
}
return bridge->num;
}
EXPORT_SYMBOL(vme_bus_num);
/* - Bridge Registration --------------------------------------------------- */
static void vme_dev_release(struct device *dev)
{
kfree(dev_to_vme_dev(dev));
}
/* Common bridge initialization */
struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
{
INIT_LIST_HEAD(&bridge->vme_error_handlers);
INIT_LIST_HEAD(&bridge->master_resources);
INIT_LIST_HEAD(&bridge->slave_resources);
INIT_LIST_HEAD(&bridge->dma_resources);
INIT_LIST_HEAD(&bridge->lm_resources);
mutex_init(&bridge->irq_mtx);
return bridge;
}
EXPORT_SYMBOL(vme_init_bridge);
int vme_register_bridge(struct vme_bridge *bridge)
{
int i;
int ret = -1;
mutex_lock(&vme_buses_lock);
for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
if ((vme_bus_numbers & (1 << i)) == 0) {
vme_bus_numbers |= (1 << i);
bridge->num = i;
INIT_LIST_HEAD(&bridge->devices);
list_add_tail(&bridge->bus_list, &vme_bus_list);
ret = 0;
break;
}
}
mutex_unlock(&vme_buses_lock);
return ret;
}
EXPORT_SYMBOL(vme_register_bridge);
void vme_unregister_bridge(struct vme_bridge *bridge)
{
struct vme_dev *vdev;
struct vme_dev *tmp;
mutex_lock(&vme_buses_lock);
vme_bus_numbers &= ~(1 << bridge->num);
list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
list_del(&vdev->drv_list);
list_del(&vdev->bridge_list);
device_unregister(&vdev->dev);
}
list_del(&bridge->bus_list);
mutex_unlock(&vme_buses_lock);
}
EXPORT_SYMBOL(vme_unregister_bridge);
/* - Driver Registration --------------------------------------------------- */
static int __vme_register_driver_bus(struct vme_driver *drv,
struct vme_bridge *bridge,
unsigned int ndevs)
{
int err;
unsigned int i;
struct vme_dev *vdev;
struct vme_dev *tmp;
for (i = 0; i < ndevs; i++) {
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
err = -ENOMEM;
goto err_devalloc;
}
vdev->num = i;
vdev->bridge = bridge;
vdev->dev.platform_data = drv;
vdev->dev.release = vme_dev_release;
vdev->dev.parent = bridge->parent;
vdev->dev.bus = &vme_bus_type;
dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
vdev->num);
err = device_register(&vdev->dev);
if (err)
goto err_reg;
if (vdev->dev.platform_data) {
list_add_tail(&vdev->drv_list, &drv->devices);
list_add_tail(&vdev->bridge_list, &bridge->devices);
} else
device_unregister(&vdev->dev);
}
return 0;
err_reg:
put_device(&vdev->dev);
err_devalloc:
list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
list_del(&vdev->drv_list);
list_del(&vdev->bridge_list);
device_unregister(&vdev->dev);
}
return err;
}
static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
{
struct vme_bridge *bridge;
int err = 0;
mutex_lock(&vme_buses_lock);
list_for_each_entry(bridge, &vme_bus_list, bus_list) {
/*
* This cannot cause trouble as we already have vme_buses_lock
* and if the bridge is removed, it will have to go through
* vme_unregister_bridge() to do it (which calls remove() on
* the bridge which in turn tries to acquire vme_buses_lock and
* will have to wait).
*/
err = __vme_register_driver_bus(drv, bridge, ndevs);
if (err)
break;
}
mutex_unlock(&vme_buses_lock);
return err;
}
/**
* vme_register_driver - Register a VME driver
* @drv: Pointer to VME driver structure to register.
* @ndevs: Maximum number of devices to allow to be enumerated.
*
* Register a VME device driver with the VME subsystem.
*
* Return: Zero on success, error value on registration failure.
*/
int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
{
int err;
drv->driver.name = drv->name;
drv->driver.bus = &vme_bus_type;
INIT_LIST_HEAD(&drv->devices);
err = driver_register(&drv->driver);
if (err)
return err;
err = __vme_register_driver(drv, ndevs);
if (err)
driver_unregister(&drv->driver);
return err;
}
EXPORT_SYMBOL(vme_register_driver);
/**
* vme_unregister_driver - Unregister a VME driver
* @drv: Pointer to VME driver structure to unregister.
*
* Unregister a VME device driver from the VME subsystem.
*/
void vme_unregister_driver(struct vme_driver *drv)
{
struct vme_dev *dev, *dev_tmp;
mutex_lock(&vme_buses_lock);
list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
list_del(&dev->drv_list);
list_del(&dev->bridge_list);
device_unregister(&dev->dev);
}
mutex_unlock(&vme_buses_lock);
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(vme_unregister_driver);
/* - Bus Registration ------------------------------------------------------ */
static int vme_bus_match(struct device *dev, struct device_driver *drv)
{
struct vme_driver *vme_drv;
vme_drv = container_of(drv, struct vme_driver, driver);
if (dev->platform_data == vme_drv) {
struct vme_dev *vdev = dev_to_vme_dev(dev);
if (vme_drv->match && vme_drv->match(vdev))
return 1;
dev->platform_data = NULL;
}
return 0;
}
static int vme_bus_probe(struct device *dev)
{
struct vme_driver *driver;
struct vme_dev *vdev = dev_to_vme_dev(dev);
driver = dev->platform_data;
if (driver->probe)
return driver->probe(vdev);
return -ENODEV;
}
static void vme_bus_remove(struct device *dev)
{
struct vme_driver *driver;
struct vme_dev *vdev = dev_to_vme_dev(dev);
driver = dev->platform_data;
if (driver->remove)
driver->remove(vdev);
}
struct bus_type vme_bus_type = {
.name = "vme",
.match = vme_bus_match,
.probe = vme_bus_probe,
.remove = vme_bus_remove,
};
EXPORT_SYMBOL(vme_bus_type);
static int __init vme_init(void)
{
return bus_register(&vme_bus_type);
}
subsys_initcall(vme_init);
| linux-master | drivers/staging/vme_user/vme.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Support for the Tundra TSI148 VME-PCI Bridge Chip
*
* Author: Martyn Welch <[email protected]>
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* Based on work by Tom Armistead and Ajit Prem
* Copyright 2004 Motorola Inc.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/byteorder/generic.h>
#include "vme.h"
#include "vme_bridge.h"
#include "vme_tsi148.h"
static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
static void tsi148_remove(struct pci_dev *);
/* Module parameter */
static bool err_chk;
static int geoid;
static const char driver_name[] = "vme_tsi148";
static const struct pci_device_id tsi148_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
{ },
};
MODULE_DEVICE_TABLE(pci, tsi148_ids);
static struct pci_driver tsi148_driver = {
.name = driver_name,
.id_table = tsi148_ids,
.probe = tsi148_probe,
.remove = tsi148_remove,
};
static void reg_join(unsigned int high, unsigned int low,
unsigned long long *variable)
{
*variable = (unsigned long long)high << 32;
*variable |= (unsigned long long)low;
}
static void reg_split(unsigned long long variable, unsigned int *high,
unsigned int *low)
{
*low = (unsigned int)variable & 0xFFFFFFFF;
*high = (unsigned int)(variable >> 32);
}
/*
* Wakes up DMA queue.
*/
static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
int channel_mask)
{
u32 serviced = 0;
if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
wake_up(&bridge->dma_queue[0]);
serviced |= TSI148_LCSR_INTC_DMA0C;
}
if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
wake_up(&bridge->dma_queue[1]);
serviced |= TSI148_LCSR_INTC_DMA1C;
}
return serviced;
}
/*
* Wake up location monitor queue
*/
static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
{
int i;
u32 serviced = 0;
for (i = 0; i < 4; i++) {
if (stat & TSI148_LCSR_INTS_LMS[i]) {
/* We only enable interrupts if the callback is set */
bridge->lm_callback[i](bridge->lm_data[i]);
serviced |= TSI148_LCSR_INTC_LMC[i];
}
}
return serviced;
}
/*
* Wake up mail box queue.
*
* XXX This functionality is not exposed up though API.
*/
static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
{
int i;
u32 val;
u32 serviced = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
for (i = 0; i < 4; i++) {
if (stat & TSI148_LCSR_INTS_MBS[i]) {
val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
dev_err(tsi148_bridge->parent, "VME Mailbox %d received: 0x%x\n",
i, val);
serviced |= TSI148_LCSR_INTC_MBC[i];
}
}
return serviced;
}
/*
* Display error & status message when PERR (PCI) exception interrupt occurs.
*/
static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
{
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPAU),
ioread32be(bridge->base + TSI148_LCSR_EDPAL),
ioread32be(bridge->base + TSI148_LCSR_EDPAT));
dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPXA),
ioread32be(bridge->base + TSI148_LCSR_EDPXS));
iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
return TSI148_LCSR_INTC_PERRC;
}
/*
* Save address and status when VME error interrupt occurs.
*/
static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
{
unsigned int error_addr_high, error_addr_low;
unsigned long long error_addr;
u32 error_attrib;
int error_am;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8;
reg_join(error_addr_high, error_addr_low, &error_addr);
/* Check for exception register overflow (we have lost error data) */
if (error_attrib & TSI148_LCSR_VEAT_VEOF)
dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow Occurred\n");
if (err_chk)
vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
else
dev_err(tsi148_bridge->parent,
"VME Bus Error at address: 0x%llx, attributes: %08x\n",
error_addr, error_attrib);
/* Clear Status */
iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
return TSI148_LCSR_INTC_VERRC;
}
/*
* Wake up IACK queue.
*/
static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
{
wake_up(&bridge->iack_queue);
return TSI148_LCSR_INTC_IACKC;
}
/*
* Calling VME bus interrupt callback if provided.
*/
static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
u32 stat)
{
int vec, i, serviced = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
for (i = 7; i > 0; i--) {
if (stat & (1 << i)) {
/*
* Note: Even though the registers are defined as
* 32-bits in the spec, we only want to issue 8-bit
* IACK cycles on the bus, read from offset 3.
*/
vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
vme_irq_handler(tsi148_bridge, i, vec);
serviced |= (1 << i);
}
}
return serviced;
}
/*
* Top level interrupt handler. Clears appropriate interrupt status bits and
* then calls appropriate sub handler(s).
*/
static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
{
u32 stat, enable, serviced = 0;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = ptr;
bridge = tsi148_bridge->driver_priv;
/* Determine which interrupts are unmasked and set */
enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
/* Only look at unmasked interrupts */
stat &= enable;
if (unlikely(!stat))
return IRQ_NONE;
/* Call subhandlers as appropriate */
/* DMA irqs */
if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
serviced |= tsi148_DMA_irqhandler(bridge, stat);
/* Location monitor irqs */
if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
serviced |= tsi148_LM_irqhandler(bridge, stat);
/* Mail box irqs */
if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
/* PCI bus error */
if (stat & TSI148_LCSR_INTS_PERRS)
serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
/* VME bus error */
if (stat & TSI148_LCSR_INTS_VERRS)
serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
/* IACK irq */
if (stat & TSI148_LCSR_INTS_IACKS)
serviced |= tsi148_IACK_irqhandler(bridge);
/* VME bus irqs */
if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
TSI148_LCSR_INTS_IRQ1S))
serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
/* Clear serviced interrupts */
iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
return IRQ_HANDLED;
}
static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
{
int result;
unsigned int tmp;
struct pci_dev *pdev;
struct tsi148_driver *bridge;
pdev = to_pci_dev(tsi148_bridge->parent);
bridge = tsi148_bridge->driver_priv;
result = request_irq(pdev->irq,
tsi148_irqhandler,
IRQF_SHARED,
driver_name, tsi148_bridge);
if (result) {
dev_err(tsi148_bridge->parent, "Can't get assigned pci irq vector %02X\n",
pdev->irq);
return result;
}
/* Enable and unmask interrupts */
tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
TSI148_LCSR_INTEO_IACKEO;
/* This leaves the following interrupts masked.
* TSI148_LCSR_INTEO_VIEEO
* TSI148_LCSR_INTEO_SYSFLEO
* TSI148_LCSR_INTEO_ACFLEO
*/
/* Don't enable Location Monitor interrupts here - they will be
* enabled when the location monitors are properly configured and
* a callback has been attached.
* TSI148_LCSR_INTEO_LM0EO
* TSI148_LCSR_INTEO_LM1EO
* TSI148_LCSR_INTEO_LM2EO
* TSI148_LCSR_INTEO_LM3EO
*/
/* Don't enable VME interrupts until we add a handler, else the board
* will respond to it and we don't want that unless it knows how to
* properly deal with it.
* TSI148_LCSR_INTEO_IRQ7EO
* TSI148_LCSR_INTEO_IRQ6EO
* TSI148_LCSR_INTEO_IRQ5EO
* TSI148_LCSR_INTEO_IRQ4EO
* TSI148_LCSR_INTEO_IRQ3EO
* TSI148_LCSR_INTEO_IRQ2EO
* TSI148_LCSR_INTEO_IRQ1EO
*/
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
return 0;
}
static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
struct pci_dev *pdev)
{
struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
/* Turn off interrupts */
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
/* Clear all interrupts */
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
/* Detach interrupt handler */
free_irq(pdev->irq, tsi148_bridge);
}
/*
* Check to see if an IACk has been received, return true (1) or false (0).
*/
static int tsi148_iack_received(struct tsi148_driver *bridge)
{
u32 tmp;
tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
if (tmp & TSI148_LCSR_VICR_IRQS)
return 0;
else
return 1;
}
/*
* Configure VME interrupt
*/
static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
int state, int sync)
{
struct pci_dev *pdev;
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* We need to do the ordering differently for enabling and disabling */
if (state == 0) {
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
if (sync != 0) {
pdev = to_pci_dev(tsi148_bridge->parent);
synchronize_irq(pdev->irq);
}
} else {
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
}
}
/*
* Generate a VME bus interrupt at the requested level & vector. Wait for
* interrupt to be acked.
*/
static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
int statid)
{
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&bridge->vme_int);
/* Read VICR register */
tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
/* Set Status/ID */
tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
(statid & TSI148_LCSR_VICR_STID_M);
iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
/* Assert VMEbus IRQ */
tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
/* XXX Consider implementing a timeout? */
wait_event_interruptible(bridge->iack_queue,
tsi148_iack_received(bridge));
mutex_unlock(&bridge->vme_int);
return 0;
}
/*
* Initialize a slave window with the requested attributes.
*/
static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
dma_addr_t pci_base, u32 aspace, u32 cycle)
{
unsigned int i, addr = 0, granularity = 0;
unsigned int temp_ctl = 0;
unsigned int vme_base_low, vme_base_high;
unsigned int vme_bound_low, vme_bound_high;
unsigned int pci_offset_low, pci_offset_high;
unsigned long long vme_bound, pci_offset;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
i = image->number;
switch (aspace) {
case VME_A16:
granularity = 0x10;
addr |= TSI148_LCSR_ITAT_AS_A16;
break;
case VME_A24:
granularity = 0x1000;
addr |= TSI148_LCSR_ITAT_AS_A24;
break;
case VME_A32:
granularity = 0x10000;
addr |= TSI148_LCSR_ITAT_AS_A32;
break;
case VME_A64:
granularity = 0x10000;
addr |= TSI148_LCSR_ITAT_AS_A64;
break;
default:
dev_err(tsi148_bridge->parent, "Invalid address space\n");
return -EINVAL;
}
/* Convert 64-bit variables to 2x 32-bit variables */
reg_split(vme_base, &vme_base_high, &vme_base_low);
/*
* Bound address is a valid address for the window, adjust
* accordingly
*/
vme_bound = vme_base + size - granularity;
reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
pci_offset = (unsigned long long)pci_base - vme_base;
reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
if (vme_base_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
return -EINVAL;
}
if (vme_bound_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
return -EINVAL;
}
if (pci_offset_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid PCI Offset alignment\n");
return -EINVAL;
}
/* Disable while we are mucking around */
temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
temp_ctl &= ~TSI148_LCSR_ITAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
/* Setup mapping */
iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAU);
iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAL);
iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAU);
iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAL);
iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFU);
iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFL);
/* Setup 2eSST speeds */
temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
break;
case VME_2eSST267:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
break;
case VME_2eSST320:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
break;
}
/* Setup cycle types */
temp_ctl &= ~(0x1F << 7);
if (cycle & VME_BLT)
temp_ctl |= TSI148_LCSR_ITAT_BLT;
if (cycle & VME_MBLT)
temp_ctl |= TSI148_LCSR_ITAT_MBLT;
if (cycle & VME_2eVME)
temp_ctl |= TSI148_LCSR_ITAT_2eVME;
if (cycle & VME_2eSST)
temp_ctl |= TSI148_LCSR_ITAT_2eSST;
if (cycle & VME_2eSSTB)
temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
/* Setup address space */
temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
temp_ctl |= addr;
temp_ctl &= ~0xF;
if (cycle & VME_SUPER)
temp_ctl |= TSI148_LCSR_ITAT_SUPR;
if (cycle & VME_USER)
temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
if (cycle & VME_PROG)
temp_ctl |= TSI148_LCSR_ITAT_PGM;
if (cycle & VME_DATA)
temp_ctl |= TSI148_LCSR_ITAT_DATA;
/* Write ctl reg without enable */
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
if (enabled)
temp_ctl |= TSI148_LCSR_ITAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
return 0;
}
/*
* Get slave window configuration.
*/
static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned int vme_base_low, vme_base_high;
unsigned int vme_bound_low, vme_bound_high;
unsigned int pci_offset_low, pci_offset_high;
unsigned long long vme_bound, pci_offset;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
/* Read registers */
ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAU);
vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAL);
vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAU);
vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAL);
pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFU);
pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFL);
/* Convert 64-bit variables to 2x 32-bit variables */
reg_join(vme_base_high, vme_base_low, vme_base);
reg_join(vme_bound_high, vme_bound_low, &vme_bound);
reg_join(pci_offset_high, pci_offset_low, &pci_offset);
*pci_base = (dma_addr_t)(*vme_base + pci_offset);
*enabled = 0;
*aspace = 0;
*cycle = 0;
if (ctl & TSI148_LCSR_ITAT_EN)
*enabled = 1;
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
granularity = 0x10;
*aspace |= VME_A16;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
granularity = 0x1000;
*aspace |= VME_A24;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
granularity = 0x10000;
*aspace |= VME_A32;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
granularity = 0x10000;
*aspace |= VME_A64;
}
/* Need granularity before we set the size */
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
*cycle |= VME_2eSST160;
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
*cycle |= VME_2eSST267;
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
*cycle |= VME_2eSST320;
if (ctl & TSI148_LCSR_ITAT_BLT)
*cycle |= VME_BLT;
if (ctl & TSI148_LCSR_ITAT_MBLT)
*cycle |= VME_MBLT;
if (ctl & TSI148_LCSR_ITAT_2eVME)
*cycle |= VME_2eVME;
if (ctl & TSI148_LCSR_ITAT_2eSST)
*cycle |= VME_2eSST;
if (ctl & TSI148_LCSR_ITAT_2eSSTB)
*cycle |= VME_2eSSTB;
if (ctl & TSI148_LCSR_ITAT_SUPR)
*cycle |= VME_SUPER;
if (ctl & TSI148_LCSR_ITAT_NPRIV)
*cycle |= VME_USER;
if (ctl & TSI148_LCSR_ITAT_PGM)
*cycle |= VME_PROG;
if (ctl & TSI148_LCSR_ITAT_DATA)
*cycle |= VME_DATA;
return 0;
}
/*
* Allocate and map PCI Resource
*/
static int tsi148_alloc_resource(struct vme_master_resource *image,
unsigned long long size)
{
unsigned long long existing_size;
int retval = 0;
struct pci_dev *pdev;
struct vme_bridge *tsi148_bridge;
tsi148_bridge = image->parent;
pdev = to_pci_dev(tsi148_bridge->parent);
existing_size = (unsigned long long)(image->bus_resource.end -
image->bus_resource.start);
/* If the existing size is OK, return */
if ((size != 0) && (existing_size == (size - 1)))
return 0;
if (existing_size != 0) {
iounmap(image->kern_base);
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
/* Exit here if size is zero */
if (size == 0)
return 0;
if (!image->bus_resource.name) {
image->bus_resource.name = kmalloc(VMENAMSIZ + 3, GFP_ATOMIC);
if (!image->bus_resource.name) {
retval = -ENOMEM;
goto err_name;
}
}
sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
image->number);
image->bus_resource.start = 0;
image->bus_resource.end = (unsigned long)size;
image->bus_resource.flags = IORESOURCE_MEM;
retval = pci_bus_alloc_resource(pdev->bus,
&image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
0, NULL, NULL);
if (retval) {
dev_err(tsi148_bridge->parent, "Failed to allocate mem resource for window %d size 0x%lx start 0x%lx\n",
image->number, (unsigned long)size,
(unsigned long)image->bus_resource.start);
goto err_resource;
}
image->kern_base = ioremap(
image->bus_resource.start, size);
if (!image->kern_base) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
}
return 0;
err_remap:
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(image->bus_resource));
err_name:
return retval;
}
/*
* Free and unmap PCI Resource
*/
static void tsi148_free_resource(struct vme_master_resource *image)
{
iounmap(image->kern_base);
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
/*
* Set the attributes of an outbound window.
*/
static int tsi148_master_set(struct vme_master_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size, u32 aspace,
u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i;
unsigned int temp_ctl = 0;
unsigned int pci_base_low, pci_base_high;
unsigned int pci_bound_low, pci_bound_high;
unsigned int vme_offset_low, vme_offset_high;
unsigned long long pci_bound, vme_offset, pci_base;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
struct pci_bus_region region;
struct pci_dev *pdev;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
pdev = to_pci_dev(tsi148_bridge->parent);
/* Verify input data */
if (vme_base & 0xFFFF) {
dev_err(tsi148_bridge->parent, "Invalid VME Window alignment\n");
retval = -EINVAL;
goto err_window;
}
if ((size == 0) && (enabled != 0)) {
dev_err(tsi148_bridge->parent, "Size must be non-zero for enabled windows\n");
retval = -EINVAL;
goto err_window;
}
spin_lock(&image->lock);
/* Let's allocate the resource here rather than further up the stack as
* it avoids pushing loads of bus dependent stuff up the stack. If size
* is zero, any existing resource will be freed.
*/
retval = tsi148_alloc_resource(image, size);
if (retval) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Unable to allocate memory for resource\n");
goto err_res;
}
if (size == 0) {
pci_base = 0;
pci_bound = 0;
vme_offset = 0;
} else {
pcibios_resource_to_bus(pdev->bus, ®ion,
&image->bus_resource);
pci_base = region.start;
/*
* Bound address is a valid address for the window, adjust
* according to window granularity.
*/
pci_bound = pci_base + (size - 0x10000);
vme_offset = vme_base - pci_base;
}
/* Convert 64-bit variables to 2x 32-bit variables */
reg_split(pci_base, &pci_base_high, &pci_base_low);
reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
if (pci_base_low & 0xFFFF) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (pci_bound_low & 0xFFFF) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (vme_offset_low & 0xFFFF) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid VME Offset alignment\n");
retval = -EINVAL;
goto err_gran;
}
i = image->number;
/* Disable while we are mucking around */
temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
temp_ctl &= ~TSI148_LCSR_OTAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
/* Setup 2eSST speeds */
temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
break;
case VME_2eSST267:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
break;
case VME_2eSST320:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_BLT) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
}
if (cycle & VME_MBLT) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
}
if (cycle & VME_2eVME) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
}
if (cycle & VME_2eSST) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
}
if (cycle & VME_2eSSTB) {
dev_warn(tsi148_bridge->parent, "Currently not setting Broadcast Select Registers\n");
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
}
/* Setup data width */
temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
switch (dwidth) {
case VME_D16:
temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
break;
case VME_D32:
temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
break;
default:
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid data width\n");
retval = -EINVAL;
goto err_dwidth;
}
/* Setup address space */
temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
switch (aspace) {
case VME_A16:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
break;
case VME_A24:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
break;
case VME_A32:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
break;
case VME_A64:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
break;
case VME_CRCSR:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
break;
case VME_USER1:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
break;
case VME_USER2:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
break;
case VME_USER3:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
break;
case VME_USER4:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
break;
default:
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid address space\n");
retval = -EINVAL;
goto err_aspace;
}
temp_ctl &= ~(3 << 4);
if (cycle & VME_SUPER)
temp_ctl |= TSI148_LCSR_OTAT_SUP;
if (cycle & VME_PROG)
temp_ctl |= TSI148_LCSR_OTAT_PGM;
/* Setup mapping */
iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAU);
iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAL);
iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFU);
iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFL);
/* Write ctl reg without enable */
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
if (enabled)
temp_ctl |= TSI148_LCSR_OTAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
spin_unlock(&image->lock);
return 0;
err_aspace:
err_dwidth:
err_gran:
tsi148_free_resource(image);
err_res:
err_window:
return retval;
}
/*
* Set the attributes of an outbound window.
*
* XXX Not parsing prefetch information.
*/
static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
u32 *cycle, u32 *dwidth)
{
unsigned int i, ctl;
unsigned int pci_base_low, pci_base_high;
unsigned int pci_bound_low, pci_bound_high;
unsigned int vme_offset_low, vme_offset_high;
unsigned long long pci_base, pci_bound, vme_offset;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAU);
pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAL);
vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFU);
vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFL);
/* Convert 64-bit variables to 2x 32-bit variables */
reg_join(pci_base_high, pci_base_low, &pci_base);
reg_join(pci_bound_high, pci_bound_low, &pci_bound);
reg_join(vme_offset_high, vme_offset_low, &vme_offset);
*vme_base = pci_base + vme_offset;
*size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
*enabled = 0;
*aspace = 0;
*cycle = 0;
*dwidth = 0;
if (ctl & TSI148_LCSR_OTAT_EN)
*enabled = 1;
/* Setup address space */
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
*aspace |= VME_A16;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
*aspace |= VME_A24;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
*aspace |= VME_A32;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
*aspace |= VME_A64;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
*aspace |= VME_CRCSR;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
*aspace |= VME_USER1;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
*aspace |= VME_USER2;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
*aspace |= VME_USER3;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
*aspace |= VME_USER4;
/* Setup 2eSST speeds */
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
*cycle |= VME_2eSST160;
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
*cycle |= VME_2eSST267;
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
*cycle |= VME_2eSST320;
/* Setup cycle types */
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
*cycle |= VME_SCT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
*cycle |= VME_BLT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
*cycle |= VME_MBLT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
*cycle |= VME_2eVME;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
*cycle |= VME_2eSST;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
*cycle |= VME_2eSSTB;
if (ctl & TSI148_LCSR_OTAT_SUP)
*cycle |= VME_SUPER;
else
*cycle |= VME_USER;
if (ctl & TSI148_LCSR_OTAT_PGM)
*cycle |= VME_PROG;
else
*cycle |= VME_DATA;
/* Setup data width */
if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
*dwidth = VME_D16;
if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
*dwidth = VME_D32;
return 0;
}
static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
u32 *cycle, u32 *dwidth)
{
int retval;
spin_lock(&image->lock);
retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
cycle, dwidth);
spin_unlock(&image->lock);
return retval;
}
static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval, enabled;
unsigned long long vme_base, size;
u32 aspace, cycle, dwidth;
struct vme_error_handler *handler = NULL;
struct vme_bridge *tsi148_bridge;
void __iomem *addr = image->kern_base + offset;
unsigned int done = 0;
unsigned int count32;
tsi148_bridge = image->parent;
spin_lock(&image->lock);
if (err_chk) {
__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
&cycle, &dwidth);
handler = vme_register_error_handler(tsi148_bridge, aspace,
vme_base + offset, count);
if (!handler) {
spin_unlock(&image->lock);
return -ENOMEM;
}
}
/* The following code handles VME address alignment. We cannot use
* memcpy_xxx here because it may cut data transfers in to 8-bit
* cycles when D16 or D32 cycles are required on the VME bus.
* On the other hand, the bridge itself assures that the maximum data
* cycle configured for the transfer is used and splits it
* automatically for non-aligned addresses, so we don't want the
* overhead of needlessly forcing small transfers for the entire cycle.
*/
if ((uintptr_t)addr & 0x1) {
*(u8 *)buf = ioread8(addr);
done += 1;
if (done == count)
goto out;
}
if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
goto out;
} else {
*(u16 *)(buf + done) = ioread16(addr + done);
done += 2;
}
}
count32 = (count - done) & ~0x3;
while (done < count32) {
*(u32 *)(buf + done) = ioread32(addr + done);
done += 4;
}
if ((count - done) & 0x2) {
*(u16 *)(buf + done) = ioread16(addr + done);
done += 2;
}
if ((count - done) & 0x1) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
}
out:
retval = count;
if (err_chk) {
if (handler->num_errors) {
dev_err(image->parent->parent,
"First VME read error detected an at address 0x%llx\n",
handler->first_error);
retval = handler->first_error - (vme_base + offset);
}
vme_unregister_error_handler(handler);
}
spin_unlock(&image->lock);
return retval;
}
static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval = 0, enabled;
unsigned long long vme_base, size;
u32 aspace, cycle, dwidth;
void __iomem *addr = image->kern_base + offset;
unsigned int done = 0;
unsigned int count32;
struct vme_error_handler *handler = NULL;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
spin_lock(&image->lock);
if (err_chk) {
__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
&cycle, &dwidth);
handler = vme_register_error_handler(tsi148_bridge, aspace,
vme_base + offset, count);
if (!handler) {
spin_unlock(&image->lock);
return -ENOMEM;
}
}
/* Here we apply for the same strategy we do in master_read
* function in order to assure the correct cycles.
*/
if ((uintptr_t)addr & 0x1) {
iowrite8(*(u8 *)buf, addr);
done += 1;
if (done == count)
goto out;
}
if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
goto out;
} else {
iowrite16(*(u16 *)(buf + done), addr + done);
done += 2;
}
}
count32 = (count - done) & ~0x3;
while (done < count32) {
iowrite32(*(u32 *)(buf + done), addr + done);
done += 4;
}
if ((count - done) & 0x2) {
iowrite16(*(u16 *)(buf + done), addr + done);
done += 2;
}
if ((count - done) & 0x1) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
}
out:
retval = count;
/*
* Writes are posted. We need to do a read on the VME bus to flush out
* all of the writes before we check for errors. We can't guarantee
* that reading the data we have just written is safe. It is believed
* that there isn't any read, write re-ordering, so we can read any
* location in VME space, so lets read the Device ID from the tsi148's
* own registers as mapped into CR/CSR space.
*
* We check for saved errors in the written address range/space.
*/
if (err_chk) {
ioread16(bridge->flush_image->kern_base + 0x7F000);
if (handler->num_errors) {
dev_warn(tsi148_bridge->parent,
"First VME write error detected an at address 0x%llx\n",
handler->first_error);
retval = handler->first_error - (vme_base + offset);
}
vme_unregister_error_handler(handler);
}
spin_unlock(&image->lock);
return retval;
}
/*
* Perform an RMW cycle on the VME bus.
*
* Requires a previously configured master window, returns final value.
*/
static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset)
{
unsigned long long pci_addr;
unsigned int pci_addr_high, pci_addr_low;
u32 tmp, result;
int i;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
/* Find the PCI address that maps to the desired VME address */
i = image->number;
/* Locking as we can only do one of these at a time */
mutex_lock(&bridge->vme_rmw);
/* Lock image */
spin_lock(&image->lock);
pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
reg_join(pci_addr_high, pci_addr_low, &pci_addr);
reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
/* Configure registers */
iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
/* Enable RMW */
tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
tmp |= TSI148_LCSR_VMCTRL_RMWEN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
/* Kick process off with a read to the required address. */
result = ioread32be(image->kern_base + offset);
/* Disable RMW */
tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
spin_unlock(&image->lock);
mutex_unlock(&bridge->vme_rmw);
return result;
}
static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
u32 aspace, u32 cycle, u32 dwidth)
{
u32 val;
val = be32_to_cpu(*attr);
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
val |= TSI148_LCSR_DSAT_2eSSTM_160;
break;
case VME_2eSST267:
val |= TSI148_LCSR_DSAT_2eSSTM_267;
break;
case VME_2eSST320:
val |= TSI148_LCSR_DSAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_SCT)
val |= TSI148_LCSR_DSAT_TM_SCT;
if (cycle & VME_BLT)
val |= TSI148_LCSR_DSAT_TM_BLT;
if (cycle & VME_MBLT)
val |= TSI148_LCSR_DSAT_TM_MBLT;
if (cycle & VME_2eVME)
val |= TSI148_LCSR_DSAT_TM_2eVME;
if (cycle & VME_2eSST)
val |= TSI148_LCSR_DSAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
dev_err(dev, "Currently not setting Broadcast Select Registers\n");
val |= TSI148_LCSR_DSAT_TM_2eSSTB;
}
/* Setup data width */
switch (dwidth) {
case VME_D16:
val |= TSI148_LCSR_DSAT_DBW_16;
break;
case VME_D32:
val |= TSI148_LCSR_DSAT_DBW_32;
break;
default:
dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
/* Setup address space */
switch (aspace) {
case VME_A16:
val |= TSI148_LCSR_DSAT_AMODE_A16;
break;
case VME_A24:
val |= TSI148_LCSR_DSAT_AMODE_A24;
break;
case VME_A32:
val |= TSI148_LCSR_DSAT_AMODE_A32;
break;
case VME_A64:
val |= TSI148_LCSR_DSAT_AMODE_A64;
break;
case VME_CRCSR:
val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
break;
case VME_USER1:
val |= TSI148_LCSR_DSAT_AMODE_USER1;
break;
case VME_USER2:
val |= TSI148_LCSR_DSAT_AMODE_USER2;
break;
case VME_USER3:
val |= TSI148_LCSR_DSAT_AMODE_USER3;
break;
case VME_USER4:
val |= TSI148_LCSR_DSAT_AMODE_USER4;
break;
default:
dev_err(dev, "Invalid address space\n");
return -EINVAL;
}
if (cycle & VME_SUPER)
val |= TSI148_LCSR_DSAT_SUP;
if (cycle & VME_PROG)
val |= TSI148_LCSR_DSAT_PGM;
*attr = cpu_to_be32(val);
return 0;
}
static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
u32 aspace, u32 cycle, u32 dwidth)
{
u32 val;
val = be32_to_cpu(*attr);
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
val |= TSI148_LCSR_DDAT_2eSSTM_160;
break;
case VME_2eSST267:
val |= TSI148_LCSR_DDAT_2eSSTM_267;
break;
case VME_2eSST320:
val |= TSI148_LCSR_DDAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_SCT)
val |= TSI148_LCSR_DDAT_TM_SCT;
if (cycle & VME_BLT)
val |= TSI148_LCSR_DDAT_TM_BLT;
if (cycle & VME_MBLT)
val |= TSI148_LCSR_DDAT_TM_MBLT;
if (cycle & VME_2eVME)
val |= TSI148_LCSR_DDAT_TM_2eVME;
if (cycle & VME_2eSST)
val |= TSI148_LCSR_DDAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
dev_err(dev, "Currently not setting Broadcast Select Registers\n");
val |= TSI148_LCSR_DDAT_TM_2eSSTB;
}
/* Setup data width */
switch (dwidth) {
case VME_D16:
val |= TSI148_LCSR_DDAT_DBW_16;
break;
case VME_D32:
val |= TSI148_LCSR_DDAT_DBW_32;
break;
default:
dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
/* Setup address space */
switch (aspace) {
case VME_A16:
val |= TSI148_LCSR_DDAT_AMODE_A16;
break;
case VME_A24:
val |= TSI148_LCSR_DDAT_AMODE_A24;
break;
case VME_A32:
val |= TSI148_LCSR_DDAT_AMODE_A32;
break;
case VME_A64:
val |= TSI148_LCSR_DDAT_AMODE_A64;
break;
case VME_CRCSR:
val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
break;
case VME_USER1:
val |= TSI148_LCSR_DDAT_AMODE_USER1;
break;
case VME_USER2:
val |= TSI148_LCSR_DDAT_AMODE_USER2;
break;
case VME_USER3:
val |= TSI148_LCSR_DDAT_AMODE_USER3;
break;
case VME_USER4:
val |= TSI148_LCSR_DDAT_AMODE_USER4;
break;
default:
dev_err(dev, "Invalid address space\n");
return -EINVAL;
}
if (cycle & VME_SUPER)
val |= TSI148_LCSR_DDAT_SUP;
if (cycle & VME_PROG)
val |= TSI148_LCSR_DDAT_PGM;
*attr = cpu_to_be32(val);
return 0;
}
/*
* Add a link list descriptor to the list
*
* Note: DMA engine expects the DMA descriptor to be big endian.
*/
static int tsi148_dma_list_add(struct vme_dma_list *list,
struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
{
struct tsi148_dma_entry *entry, *prev;
u32 address_high, address_low, val;
struct vme_dma_pattern *pattern_attr;
struct vme_dma_pci *pci_attr;
struct vme_dma_vme *vme_attr;
int retval = 0;
struct vme_bridge *tsi148_bridge;
tsi148_bridge = list->parent->parent;
/* Descriptor must be aligned on 64-bit boundaries */
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
retval = -ENOMEM;
goto err_mem;
}
/* Test descriptor alignment */
if ((unsigned long)&entry->descriptor & 0x7) {
dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 byte boundary as required: %p\n",
&entry->descriptor);
retval = -EINVAL;
goto err_align;
}
/* Given we are going to fill out the structure, we probably don't
* need to zero it, but better safe than sorry for now.
*/
memset(&entry->descriptor, 0, sizeof(entry->descriptor));
/* Fill out source part */
switch (src->type) {
case VME_DMA_PATTERN:
pattern_attr = src->private;
entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
val = TSI148_LCSR_DSAT_TYP_PAT;
/* Default behaviour is 32 bit pattern */
if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
val |= TSI148_LCSR_DSAT_PSZ;
/* It seems that the default behaviour is to increment */
if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
val |= TSI148_LCSR_DSAT_NIN;
entry->descriptor.dsat = cpu_to_be32(val);
break;
case VME_DMA_PCI:
pci_attr = src->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
&address_low);
entry->descriptor.dsau = cpu_to_be32(address_high);
entry->descriptor.dsal = cpu_to_be32(address_low);
entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
break;
case VME_DMA_VME:
vme_attr = src->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
&address_low);
entry->descriptor.dsau = cpu_to_be32(address_high);
entry->descriptor.dsal = cpu_to_be32(address_low);
entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
retval = tsi148_dma_set_vme_src_attributes(
tsi148_bridge->parent, &entry->descriptor.dsat,
vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
if (retval < 0)
goto err_source;
break;
default:
dev_err(tsi148_bridge->parent, "Invalid source type\n");
retval = -EINVAL;
goto err_source;
}
/* Assume last link - this will be over-written by adding another */
entry->descriptor.dnlau = cpu_to_be32(0);
entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
/* Fill out destination part */
switch (dest->type) {
case VME_DMA_PCI:
pci_attr = dest->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
&address_low);
entry->descriptor.ddau = cpu_to_be32(address_high);
entry->descriptor.ddal = cpu_to_be32(address_low);
entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
break;
case VME_DMA_VME:
vme_attr = dest->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
&address_low);
entry->descriptor.ddau = cpu_to_be32(address_high);
entry->descriptor.ddal = cpu_to_be32(address_low);
entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
retval = tsi148_dma_set_vme_dest_attributes(
tsi148_bridge->parent, &entry->descriptor.ddat,
vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
if (retval < 0)
goto err_dest;
break;
default:
dev_err(tsi148_bridge->parent, "Invalid destination type\n");
retval = -EINVAL;
goto err_dest;
}
/* Fill out count */
entry->descriptor.dcnt = cpu_to_be32((u32)count);
/* Add to list */
list_add_tail(&entry->list, &list->entries);
entry->dma_handle = dma_map_single(tsi148_bridge->parent,
&entry->descriptor,
sizeof(entry->descriptor),
DMA_TO_DEVICE);
if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
dev_err(tsi148_bridge->parent, "DMA mapping error\n");
retval = -EINVAL;
goto err_dma;
}
/* Fill out previous descriptors "Next Address" */
if (entry->list.prev != &list->entries) {
reg_split((unsigned long long)entry->dma_handle, &address_high,
&address_low);
prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
list);
prev->descriptor.dnlau = cpu_to_be32(address_high);
prev->descriptor.dnlal = cpu_to_be32(address_low);
}
return 0;
err_dma:
list_del(&entry->list);
err_dest:
err_source:
err_align:
kfree(entry);
err_mem:
return retval;
}
/*
* Check to see if the provided DMA channel is busy.
*/
static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
{
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
TSI148_LCSR_OFFSET_DSTA);
if (tmp & TSI148_LCSR_DSTA_BSY)
return 0;
else
return 1;
}
/*
* Execute a previously generated link list
*
* XXX Need to provide control register configuration.
*/
static int tsi148_dma_list_exec(struct vme_dma_list *list)
{
struct vme_dma_resource *ctrlr;
int channel, retval;
struct tsi148_dma_entry *entry;
u32 bus_addr_high, bus_addr_low;
u32 val, dctlreg = 0;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
ctrlr = list->parent;
tsi148_bridge = ctrlr->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&ctrlr->mtx);
channel = ctrlr->number;
if (!list_empty(&ctrlr->running)) {
/*
* XXX We have an active DMA transfer and currently haven't
* sorted out the mechanism for "pending" DMA transfers.
* Return busy.
*/
/* Need to add to pending here */
mutex_unlock(&ctrlr->mtx);
return -EBUSY;
}
list_add(&list->list, &ctrlr->running);
/* Get first bus address and write into registers */
entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
list);
mutex_unlock(&ctrlr->mtx);
reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
iowrite32be(bus_addr_high, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
iowrite32be(bus_addr_low, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
TSI148_LCSR_OFFSET_DCTL);
/* Start the operation */
iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
retval = wait_event_interruptible(bridge->dma_queue[channel],
tsi148_dma_busy(ctrlr->parent, channel));
if (retval) {
iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
/* Wait for the operation to abort */
wait_event(bridge->dma_queue[channel],
tsi148_dma_busy(ctrlr->parent, channel));
retval = -EINTR;
goto exit;
}
/*
* Read status register, this register is valid until we kick off a
* new transfer.
*/
val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
TSI148_LCSR_OFFSET_DSTA);
if (val & TSI148_LCSR_DSTA_VBE) {
dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
retval = -EIO;
}
exit:
/* Remove list from running list */
mutex_lock(&ctrlr->mtx);
list_del(&list->list);
mutex_unlock(&ctrlr->mtx);
return retval;
}
/*
* Clean up a previously generated link list
*
* We have a separate function, don't assume that the chain can't be reused.
*/
static int tsi148_dma_list_empty(struct vme_dma_list *list)
{
struct list_head *pos, *temp;
struct tsi148_dma_entry *entry;
struct vme_bridge *tsi148_bridge = list->parent->parent;
/* detach and free each entry */
list_for_each_safe(pos, temp, &list->entries) {
list_del(pos);
entry = list_entry(pos, struct tsi148_dma_entry, list);
dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
kfree(entry);
}
return 0;
}
/*
* All 4 location monitors reside at the same base - this is therefore a
* system wide configuration.
*
* This does not enable the LM monitor - that should be done when the first
* callback is attached and disabled when the last callback is removed.
*/
static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
u32 aspace, u32 cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
int i;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = lm->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&lm->mtx);
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor callback attached, can't reset\n");
return -EBUSY;
}
}
switch (aspace) {
case VME_A16:
lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
break;
case VME_A24:
lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
break;
case VME_A32:
lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
break;
case VME_A64:
lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
break;
default:
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Invalid address space\n");
return -EINVAL;
}
if (cycle & VME_SUPER)
lm_ctl |= TSI148_LCSR_LMAT_SUPR;
if (cycle & VME_USER)
lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
if (cycle & VME_PROG)
lm_ctl |= TSI148_LCSR_LMAT_PGM;
if (cycle & VME_DATA)
lm_ctl |= TSI148_LCSR_LMAT_DATA;
reg_split(lm_base, &lm_base_high, &lm_base_low);
iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
mutex_unlock(&lm->mtx);
return 0;
}
/* Get configuration of the callback monitor and return whether it is enabled
* or disabled.
*/
static int tsi148_lm_get(struct vme_lm_resource *lm,
unsigned long long *lm_base, u32 *aspace, u32 *cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
struct tsi148_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&lm->mtx);
lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
reg_join(lm_base_high, lm_base_low, lm_base);
if (lm_ctl & TSI148_LCSR_LMAT_EN)
enabled = 1;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
*aspace |= VME_A16;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
*aspace |= VME_A24;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
*aspace |= VME_A32;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
*aspace |= VME_A64;
if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
*cycle |= VME_SUPER;
if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
*cycle |= VME_USER;
if (lm_ctl & TSI148_LCSR_LMAT_PGM)
*cycle |= VME_PROG;
if (lm_ctl & TSI148_LCSR_LMAT_DATA)
*cycle |= VME_DATA;
mutex_unlock(&lm->mtx);
return enabled;
}
/*
* Attach a callback to a specific location monitor.
*
* Callback will be passed the monitor triggered.
*/
static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
void (*callback)(void *), void *data)
{
u32 lm_ctl, tmp;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = lm->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&lm->mtx);
/* Ensure that the location monitor is configured - need PGM or DATA */
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor not properly configured\n");
return -EINVAL;
}
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Existing callback attached\n");
return -EBUSY;
}
/* Attach callback */
bridge->lm_callback[monitor] = callback;
bridge->lm_data[monitor] = data;
/* Enable Location Monitor interrupt */
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
/* Ensure that global Location Monitor Enable set */
if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
lm_ctl |= TSI148_LCSR_LMAT_EN;
iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
}
mutex_unlock(&lm->mtx);
return 0;
}
/*
* Detach a callback function forn a specific location monitor.
*/
static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
{
u32 lm_en, tmp;
struct tsi148_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&lm->mtx);
/* Disable Location Monitor and ensure previous interrupts are clear */
lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
bridge->base + TSI148_LCSR_INTC);
/* Detach callback */
bridge->lm_callback[monitor] = NULL;
bridge->lm_data[monitor] = NULL;
/* If all location monitors disabled, disable global Location Monitor */
if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
tmp &= ~TSI148_LCSR_LMAT_EN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
}
mutex_unlock(&lm->mtx);
return 0;
}
/*
* Determine Geographical Addressing
*/
static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
{
u32 slot = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
if (!geoid) {
slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
slot = slot & TSI148_LCSR_VSTAT_GA_M;
} else
slot = geoid;
return (int)slot;
}
static void *tsi148_alloc_consistent(struct device *parent, size_t size,
dma_addr_t *dma)
{
struct pci_dev *pdev;
/* Find pci_dev container of dev */
pdev = to_pci_dev(parent);
return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL);
}
static void tsi148_free_consistent(struct device *parent, size_t size,
void *vaddr, dma_addr_t dma)
{
struct pci_dev *pdev;
/* Find pci_dev container of dev */
pdev = to_pci_dev(parent);
dma_free_coherent(&pdev->dev, size, vaddr, dma);
}
/*
* Configure CR/CSR space
*
* Access to the CR/CSR can be configured at power-up. The location of the
* CR/CSR registers in the CR/CSR address space is determined by the boards
* Auto-ID or Geographic address. This function ensures that the window is
* enabled at an offset consistent with the boards geopgraphic address.
*
* Each board has a 512kB window, with the highest 4kB being used for the
* boards registers, this means there is a fix length 508kB window which must
* be mapped onto PCI memory.
*/
static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
struct pci_dev *pdev)
{
u32 cbar, crat, vstat;
u32 crcsr_bus_high, crcsr_bus_low;
int retval;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev,
VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus, GFP_KERNEL);
if (!bridge->crcsr_kernel) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for CR/CSR image\n");
return -ENOMEM;
}
reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
/* Ensure that the CR/CSR is configured at the correct offset */
cbar = ioread32be(bridge->base + TSI148_CBAR);
cbar = (cbar & TSI148_CRCSR_CBAR_M) >> 3;
vstat = tsi148_slot_get(tsi148_bridge);
if (cbar != vstat) {
cbar = vstat;
dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
iowrite32be(cbar << 3, bridge->base + TSI148_CBAR);
}
dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
if (crat & TSI148_LCSR_CRAT_EN)
dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
else {
dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
iowrite32be(crat | TSI148_LCSR_CRAT_EN,
bridge->base + TSI148_LCSR_CRAT);
}
/* If we want flushed, error-checked writes, set up a window
* over the CR/CSR registers. We read from here to safely flush
* through VME writes.
*/
if (err_chk) {
retval = tsi148_master_set(bridge->flush_image, 1,
(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
VME_D16);
if (retval)
dev_err(tsi148_bridge->parent, "Configuring flush image failed\n");
}
return 0;
}
static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
struct pci_dev *pdev)
{
u32 crat;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* Turn off CR/CSR space */
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
bridge->base + TSI148_LCSR_CRAT);
/* Free image */
iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE,
bridge->crcsr_kernel, bridge->crcsr_bus);
}
static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int retval, i, master_num;
u32 data;
struct list_head *pos = NULL, *n;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *tsi148_device;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
struct vme_lm_resource *lm;
/* If we want to support more than one of each bridge, we need to
* dynamically generate this so we get one per device
*/
tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
if (!tsi148_bridge) {
retval = -ENOMEM;
goto err_struct;
}
vme_init_bridge(tsi148_bridge);
tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
if (!tsi148_device) {
retval = -ENOMEM;
goto err_driver;
}
tsi148_bridge->driver_priv = tsi148_device;
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto err_enable;
}
/* Map Registers */
retval = pci_request_regions(pdev, driver_name);
if (retval) {
dev_err(&pdev->dev, "Unable to reserve resources\n");
goto err_resource;
}
/* map registers in BAR 0 */
tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
4096);
if (!tsi148_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Check to see if the mapping worked out */
data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
if (data != PCI_VENDOR_ID_TUNDRA) {
dev_err(&pdev->dev, "CRG region check failed\n");
retval = -EIO;
goto err_test;
}
/* Initialize wait queues & mutual exclusion flags */
init_waitqueue_head(&tsi148_device->dma_queue[0]);
init_waitqueue_head(&tsi148_device->dma_queue[1]);
init_waitqueue_head(&tsi148_device->iack_queue);
mutex_init(&tsi148_device->vme_int);
mutex_init(&tsi148_device->vme_rmw);
tsi148_bridge->parent = &pdev->dev;
strcpy(tsi148_bridge->name, driver_name);
/* Setup IRQ */
retval = tsi148_irq_init(tsi148_bridge);
if (retval != 0) {
dev_err(&pdev->dev, "Chip Initialization failed.\n");
goto err_irq;
}
/* If we are going to flush writes, we need to read from the VME bus.
* We need to do this safely, thus we read the devices own CR/CSR
* register. To do this we must set up a window in CR/CSR space and
* hence have one less master window resource available.
*/
master_num = TSI148_MAX_MASTER;
if (err_chk) {
master_num--;
tsi148_device->flush_image =
kmalloc(sizeof(*tsi148_device->flush_image),
GFP_KERNEL);
if (!tsi148_device->flush_image) {
retval = -ENOMEM;
goto err_master;
}
tsi148_device->flush_image->parent = tsi148_bridge;
spin_lock_init(&tsi148_device->flush_image->lock);
tsi148_device->flush_image->locked = 1;
tsi148_device->flush_image->number = master_num;
memset(&tsi148_device->flush_image->bus_resource, 0,
sizeof(tsi148_device->flush_image->bus_resource));
tsi148_device->flush_image->kern_base = NULL;
}
/* Add master windows to list */
for (i = 0; i < master_num; i++) {
master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
master_image->parent = tsi148_bridge;
spin_lock_init(&master_image->lock);
master_image->locked = 0;
master_image->number = i;
master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
VME_USER3 | VME_USER4;
master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
master_image->width_attr = VME_D16 | VME_D32;
memset(&master_image->bus_resource, 0,
sizeof(master_image->bus_resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&tsi148_bridge->master_resources);
}
/* Add slave windows to list */
for (i = 0; i < TSI148_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
slave_image->parent = tsi148_bridge;
mutex_init(&slave_image->mtx);
slave_image->locked = 0;
slave_image->number = i;
slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
VME_A64;
slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
list_add_tail(&slave_image->list,
&tsi148_bridge->slave_resources);
}
/* Add dma engines to list */
for (i = 0; i < TSI148_MAX_DMA; i++) {
dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
if (!dma_ctrlr) {
retval = -ENOMEM;
goto err_dma;
}
dma_ctrlr->parent = tsi148_bridge;
mutex_init(&dma_ctrlr->mtx);
dma_ctrlr->locked = 0;
dma_ctrlr->number = i;
dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
VME_DMA_PATTERN_TO_MEM;
INIT_LIST_HEAD(&dma_ctrlr->pending);
INIT_LIST_HEAD(&dma_ctrlr->running);
list_add_tail(&dma_ctrlr->list,
&tsi148_bridge->dma_resources);
}
/* Add location monitor to list */
lm = kmalloc(sizeof(*lm), GFP_KERNEL);
if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
lm->parent = tsi148_bridge;
mutex_init(&lm->mtx);
lm->locked = 0;
lm->number = 1;
lm->monitors = 4;
list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
tsi148_bridge->slave_get = tsi148_slave_get;
tsi148_bridge->slave_set = tsi148_slave_set;
tsi148_bridge->master_get = tsi148_master_get;
tsi148_bridge->master_set = tsi148_master_set;
tsi148_bridge->master_read = tsi148_master_read;
tsi148_bridge->master_write = tsi148_master_write;
tsi148_bridge->master_rmw = tsi148_master_rmw;
tsi148_bridge->dma_list_add = tsi148_dma_list_add;
tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
tsi148_bridge->irq_set = tsi148_irq_set;
tsi148_bridge->irq_generate = tsi148_irq_generate;
tsi148_bridge->lm_set = tsi148_lm_set;
tsi148_bridge->lm_get = tsi148_lm_get;
tsi148_bridge->lm_attach = tsi148_lm_attach;
tsi148_bridge->lm_detach = tsi148_lm_detach;
tsi148_bridge->slot_get = tsi148_slot_get;
tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
tsi148_bridge->free_consistent = tsi148_free_consistent;
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
(data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
if (!geoid)
dev_info(&pdev->dev, "VME geographical address is %d\n",
data & TSI148_LCSR_VSTAT_GA_M);
else
dev_info(&pdev->dev, "VME geographical address is set to %d\n",
geoid);
dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
err_chk ? "enabled" : "disabled");
retval = tsi148_crcsr_init(tsi148_bridge, pdev);
if (retval) {
dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
goto err_crcsr;
}
retval = vme_register_bridge(tsi148_bridge);
if (retval != 0) {
dev_err(&pdev->dev, "Chip Registration failed.\n");
goto err_reg;
}
pci_set_drvdata(pdev, tsi148_bridge);
/* Clear VME bus "board fail", and "power-up reset" lines */
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
data &= ~TSI148_LCSR_VSTAT_BRDFL;
data |= TSI148_LCSR_VSTAT_CPURST;
iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
return 0;
err_reg:
tsi148_crcsr_exit(tsi148_bridge, pdev);
err_crcsr:
err_lm:
/* resources are stored in link list */
list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
lm = list_entry(pos, struct vme_lm_resource, list);
list_del(pos);
kfree(lm);
}
err_dma:
/* resources are stored in link list */
list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
err_slave:
/* resources are stored in link list */
list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
err_master:
/* resources are stored in link list */
list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
tsi148_irq_exit(tsi148_bridge, pdev);
err_irq:
err_test:
iounmap(tsi148_device->base);
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err_enable:
kfree(tsi148_device);
err_driver:
kfree(tsi148_bridge);
err_struct:
return retval;
}
static void tsi148_remove(struct pci_dev *pdev)
{
struct list_head *pos = NULL;
struct list_head *tmplist;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
int i;
struct tsi148_driver *bridge;
struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
bridge = tsi148_bridge->driver_priv;
dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
/*
* Shutdown all inbound and outbound windows.
*/
for (i = 0; i < 8; i++) {
iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
}
/*
* Shutdown Location monitor.
*/
iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
/*
* Shutdown CRG map.
*/
iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
/*
* Clear error status.
*/
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
/*
* Remove VIRQ interrupt (if any)
*/
if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
/*
* Map all Interrupts to PCI INTA
*/
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
tsi148_irq_exit(tsi148_bridge, pdev);
vme_unregister_bridge(tsi148_bridge);
tsi148_crcsr_exit(tsi148_bridge, pdev);
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
iounmap(bridge->base);
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(tsi148_bridge->driver_priv);
kfree(tsi148_bridge);
}
module_pci_driver(tsi148_driver);
MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
module_param(err_chk, bool, 0);
MODULE_PARM_DESC(geoid, "Override geographical addressing");
module_param(geoid, int, 0);
MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/vme_user/vme_tsi148.c |
// SPDX-License-Identifier: GPL-2.0
/* Staging board support for KZM9D. Enable not-yet-DT-capable devices here. */
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include "board.h"
static struct resource usbs1_res[] __initdata = {
DEFINE_RES_MEM(0xe2800000, 0x2000),
DEFINE_RES_IRQ(159),
};
static void __init kzm9d_init(void)
{
board_staging_gic_setup_xlate("arm,pl390", 32);
if (!board_staging_dt_node_available(usbs1_res,
ARRAY_SIZE(usbs1_res))) {
board_staging_gic_fixup_resources(usbs1_res,
ARRAY_SIZE(usbs1_res));
platform_device_register_simple("emxx_udc", -1, usbs1_res,
ARRAY_SIZE(usbs1_res));
}
}
board_staging("renesas,kzm9d", kzm9d_init);
| linux-master | drivers/staging/board/kzm9d.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Magnus Damm
* Copyright (C) 2015 Glider bvba
*/
#define pr_fmt(fmt) "board_staging: " fmt
#include <linux/clkdev.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include "board.h"
static struct device_node *irqc_node __initdata;
static unsigned int irqc_base __initdata;
static bool find_by_address(u64 base_address)
{
struct device_node *dn = of_find_all_nodes(NULL);
struct resource res;
while (dn) {
if (!of_address_to_resource(dn, 0, &res)) {
if (res.start == base_address) {
of_node_put(dn);
return true;
}
}
dn = of_find_all_nodes(dn);
}
return false;
}
bool __init board_staging_dt_node_available(const struct resource *resource,
unsigned int num_resources)
{
unsigned int i;
for (i = 0; i < num_resources; i++) {
const struct resource *r = resource + i;
if (resource_type(r) == IORESOURCE_MEM)
if (find_by_address(r->start))
return true; /* DT node available */
}
return false; /* Nothing found */
}
int __init board_staging_gic_setup_xlate(const char *gic_match,
unsigned int base)
{
WARN_ON(irqc_node);
irqc_node = of_find_compatible_node(NULL, NULL, gic_match);
WARN_ON(!irqc_node);
if (!irqc_node)
return -ENOENT;
irqc_base = base;
return 0;
}
static void __init gic_fixup_resource(struct resource *res)
{
struct of_phandle_args irq_data;
unsigned int hwirq = res->start;
unsigned int virq;
if (resource_type(res) != IORESOURCE_IRQ || !irqc_node)
return;
irq_data.np = irqc_node;
irq_data.args_count = 3;
irq_data.args[0] = 0;
irq_data.args[1] = hwirq - irqc_base;
switch (res->flags &
(IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE |
IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_HIGHLEVEL)) {
case IORESOURCE_IRQ_LOWEDGE:
irq_data.args[2] = IRQ_TYPE_EDGE_FALLING;
break;
case IORESOURCE_IRQ_HIGHEDGE:
irq_data.args[2] = IRQ_TYPE_EDGE_RISING;
break;
case IORESOURCE_IRQ_LOWLEVEL:
irq_data.args[2] = IRQ_TYPE_LEVEL_LOW;
break;
case IORESOURCE_IRQ_HIGHLEVEL:
default:
irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
break;
}
virq = irq_create_of_mapping(&irq_data);
if (WARN_ON(!virq))
return;
pr_debug("hwirq %u -> virq %u\n", hwirq, virq);
res->start = virq;
}
void __init board_staging_gic_fixup_resources(struct resource *res,
unsigned int nres)
{
unsigned int i;
for (i = 0; i < nres; i++)
gic_fixup_resource(&res[i]);
}
int __init board_staging_register_clock(const struct board_staging_clk *bsc)
{
int error;
pr_debug("Aliasing clock %s for con_id %s dev_id %s\n", bsc->clk,
bsc->con_id, bsc->dev_id);
error = clk_add_alias(bsc->con_id, bsc->dev_id, bsc->clk, NULL);
if (error)
pr_err("Failed to alias clock %s (%d)\n", bsc->clk, error);
return error;
}
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
static int board_staging_add_dev_domain(struct platform_device *pdev,
const char *domain)
{
struct device *dev = &pdev->dev;
struct of_phandle_args pd_args;
struct device_node *np;
np = of_find_node_by_path(domain);
if (!np) {
pr_err("Cannot find domain node %s\n", domain);
return -ENOENT;
}
pd_args.np = np;
pd_args.args_count = 0;
/* Initialization similar to device_pm_init_common() */
spin_lock_init(&dev->power.lock);
dev->power.early_init = true;
return of_genpd_add_device(&pd_args, dev);
}
#else
static inline int board_staging_add_dev_domain(struct platform_device *pdev,
const char *domain)
{
return 0;
}
#endif
int __init board_staging_register_device(const struct board_staging_dev *dev)
{
struct platform_device *pdev = dev->pdev;
unsigned int i;
int error;
pr_debug("Trying to register device %s\n", pdev->name);
if (board_staging_dt_node_available(pdev->resource,
pdev->num_resources)) {
pr_warn("Skipping %s, already in DT\n", pdev->name);
return -EEXIST;
}
board_staging_gic_fixup_resources(pdev->resource, pdev->num_resources);
for (i = 0; i < dev->nclocks; i++)
board_staging_register_clock(&dev->clocks[i]);
if (dev->domain)
board_staging_add_dev_domain(pdev, dev->domain);
error = platform_device_register(pdev);
if (error) {
pr_err("Failed to register device %s (%d)\n", pdev->name,
error);
return error;
}
return error;
}
void __init board_staging_register_devices(const struct board_staging_dev *devs,
unsigned int ndevs)
{
unsigned int i;
for (i = 0; i < ndevs; i++)
board_staging_register_device(&devs[i]);
}
| linux-master | drivers/staging/board/board.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Staging board support for Armadillo 800 eva.
* Enable not-yet-DT-capable devices here.
*
* Based on board-armadillo800eva.c
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Copyright (C) 2012 Kuninori Morimoto <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/fb.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
#include <video/sh_mobile_lcdc.h>
#include "board.h"
static struct fb_videomode lcdc0_mode = {
.name = "AMPIER/AM-800480",
.xres = 800,
.yres = 480,
.left_margin = 88,
.right_margin = 40,
.hsync_len = 128,
.upper_margin = 20,
.lower_margin = 5,
.vsync_len = 5,
.sync = 0,
};
static struct sh_mobile_lcdc_info lcdc0_info = {
.clock_source = LCDC_CLK_BUS,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.fourcc = V4L2_PIX_FMT_RGB565,
.interface_type = RGB24,
.clock_divider = 5,
.flags = 0,
.lcd_modes = &lcdc0_mode,
.num_modes = 1,
.panel_cfg = {
.width = 111,
.height = 68,
},
},
};
static struct resource lcdc0_resources[] = {
DEFINE_RES_MEM_NAMED(0xfe940000, 0x4000, "LCD0"),
DEFINE_RES_IRQ(177 + 32),
};
static struct platform_device lcdc0_device = {
.name = "sh_mobile_lcdc_fb",
.num_resources = ARRAY_SIZE(lcdc0_resources),
.resource = lcdc0_resources,
.id = 0,
.dev = {
.platform_data = &lcdc0_info,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
static const struct board_staging_clk lcdc0_clocks[] __initconst = {
{ "lcdc0", NULL, "sh_mobile_lcdc_fb.0" },
};
static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
{
.pdev = &lcdc0_device,
.clocks = lcdc0_clocks,
.nclocks = ARRAY_SIZE(lcdc0_clocks),
.domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
},
};
static void __init armadillo800eva_init(void)
{
board_staging_gic_setup_xlate("arm,pl390", 32);
board_staging_register_devices(armadillo800eva_devices,
ARRAY_SIZE(armadillo800eva_devices));
}
board_staging("renesas,armadillo800eva", armadillo800eva_init);
| linux-master | drivers/staging/board/armadillo800eva.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
* Copyright (c) 2010-2012 Broadcom. All rights reserved.
*/
#include <linux/debugfs.h>
#include "vchiq_core.h"
#include "vchiq_arm.h"
#include "vchiq_debugfs.h"
#ifdef CONFIG_DEBUG_FS
#define DEBUGFS_WRITE_BUF_SIZE 256
#define VCHIQ_LOG_ERROR_STR "error"
#define VCHIQ_LOG_WARNING_STR "warning"
#define VCHIQ_LOG_INFO_STR "info"
#define VCHIQ_LOG_TRACE_STR "trace"
/* Global 'vchiq' debugfs and clients entry used by all instances */
static struct dentry *vchiq_dbg_dir;
static struct dentry *vchiq_dbg_clients;
/* Log category debugfs entries */
struct vchiq_debugfs_log_entry {
const char *name;
void *plevel;
};
static struct vchiq_debugfs_log_entry vchiq_debugfs_log_entries[] = {
{ "core", &vchiq_core_log_level },
{ "msg", &vchiq_core_msg_log_level },
{ "sync", &vchiq_sync_log_level },
{ "susp", &vchiq_susp_log_level },
{ "arm", &vchiq_arm_log_level },
};
static int debugfs_log_show(struct seq_file *f, void *offset)
{
int *levp = f->private;
char *log_value = NULL;
switch (*levp) {
case VCHIQ_LOG_ERROR:
log_value = VCHIQ_LOG_ERROR_STR;
break;
case VCHIQ_LOG_WARNING:
log_value = VCHIQ_LOG_WARNING_STR;
break;
case VCHIQ_LOG_INFO:
log_value = VCHIQ_LOG_INFO_STR;
break;
case VCHIQ_LOG_TRACE:
log_value = VCHIQ_LOG_TRACE_STR;
break;
default:
break;
}
seq_printf(f, "%s\n", log_value ? log_value : "(null)");
return 0;
}
static int debugfs_log_open(struct inode *inode, struct file *file)
{
return single_open(file, debugfs_log_show, inode->i_private);
}
static ssize_t debugfs_log_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *ppos)
{
struct seq_file *f = (struct seq_file *)file->private_data;
int *levp = f->private;
char kbuf[DEBUGFS_WRITE_BUF_SIZE + 1];
memset(kbuf, 0, DEBUGFS_WRITE_BUF_SIZE + 1);
if (count >= DEBUGFS_WRITE_BUF_SIZE)
count = DEBUGFS_WRITE_BUF_SIZE;
if (copy_from_user(kbuf, buffer, count))
return -EFAULT;
kbuf[count - 1] = 0;
if (strncmp("error", kbuf, strlen("error")) == 0)
*levp = VCHIQ_LOG_ERROR;
else if (strncmp("warning", kbuf, strlen("warning")) == 0)
*levp = VCHIQ_LOG_WARNING;
else if (strncmp("info", kbuf, strlen("info")) == 0)
*levp = VCHIQ_LOG_INFO;
else if (strncmp("trace", kbuf, strlen("trace")) == 0)
*levp = VCHIQ_LOG_TRACE;
else
*levp = VCHIQ_LOG_DEFAULT;
*ppos += count;
return count;
}
static const struct file_operations debugfs_log_fops = {
.owner = THIS_MODULE,
.open = debugfs_log_open,
.write = debugfs_log_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int debugfs_usecount_show(struct seq_file *f, void *offset)
{
struct vchiq_instance *instance = f->private;
int use_count;
use_count = vchiq_instance_get_use_count(instance);
seq_printf(f, "%d\n", use_count);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(debugfs_usecount);
static int debugfs_trace_show(struct seq_file *f, void *offset)
{
struct vchiq_instance *instance = f->private;
int trace;
trace = vchiq_instance_get_trace(instance);
seq_printf(f, "%s\n", trace ? "Y" : "N");
return 0;
}
static int debugfs_trace_open(struct inode *inode, struct file *file)
{
return single_open(file, debugfs_trace_show, inode->i_private);
}
static ssize_t debugfs_trace_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *ppos)
{
struct seq_file *f = (struct seq_file *)file->private_data;
struct vchiq_instance *instance = f->private;
char firstchar;
if (copy_from_user(&firstchar, buffer, 1))
return -EFAULT;
switch (firstchar) {
case 'Y':
case 'y':
case '1':
vchiq_instance_set_trace(instance, 1);
break;
case 'N':
case 'n':
case '0':
vchiq_instance_set_trace(instance, 0);
break;
default:
break;
}
*ppos += count;
return count;
}
static const struct file_operations debugfs_trace_fops = {
.owner = THIS_MODULE,
.open = debugfs_trace_open,
.write = debugfs_trace_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* add an instance (process) to the debugfs entries */
void vchiq_debugfs_add_instance(struct vchiq_instance *instance)
{
char pidstr[16];
struct dentry *top;
snprintf(pidstr, sizeof(pidstr), "%d",
vchiq_instance_get_pid(instance));
top = debugfs_create_dir(pidstr, vchiq_dbg_clients);
debugfs_create_file("use_count", 0444, top, instance,
&debugfs_usecount_fops);
debugfs_create_file("trace", 0644, top, instance, &debugfs_trace_fops);
vchiq_instance_get_debugfs_node(instance)->dentry = top;
}
void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
{
struct vchiq_debugfs_node *node =
vchiq_instance_get_debugfs_node(instance);
debugfs_remove_recursive(node->dentry);
}
void vchiq_debugfs_init(void)
{
struct dentry *dir;
int i;
vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL);
vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir);
/* create an entry under <debugfs>/vchiq/log for each log category */
dir = debugfs_create_dir("log", vchiq_dbg_dir);
for (i = 0; i < ARRAY_SIZE(vchiq_debugfs_log_entries); i++)
debugfs_create_file(vchiq_debugfs_log_entries[i].name, 0644,
dir, vchiq_debugfs_log_entries[i].plevel,
&debugfs_log_fops);
}
/* remove all the debugfs entries */
void vchiq_debugfs_deinit(void)
{
debugfs_remove_recursive(vchiq_dbg_dir);
}
#else /* CONFIG_DEBUG_FS */
void vchiq_debugfs_init(void)
{
}
void vchiq_debugfs_deinit(void)
{
}
void vchiq_debugfs_add_instance(struct vchiq_instance *instance)
{
}
void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
{
}
#endif /* CONFIG_DEBUG_FS */
| linux-master | drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
* Copyright (c) 2010-2012 Broadcom. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/bug.h>
#include <linux/completion.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/compat.h>
#include <linux/dma-mapping.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
#include "vchiq_core.h"
#include "vchiq_ioctl.h"
#include "vchiq_arm.h"
#include "vchiq_debugfs.h"
#include "vchiq_connected.h"
#include "vchiq_pagelist.h"
#define DEVICE_NAME "vchiq"
#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
#define BELL0 0x00
#define BELL2 0x08
#define ARM_DS_ACTIVE BIT(2)
/* Override the default prefix, which would be vchiq_arm (from the filename) */
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX DEVICE_NAME "."
#define KEEPALIVE_VER 1
#define KEEPALIVE_VER_MIN KEEPALIVE_VER
/* Run time control of log level, based on KERN_XXX level. */
int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
DEFINE_SPINLOCK(msg_queue_spinlock);
struct vchiq_state g_state;
static struct platform_device *bcm2835_camera;
static struct platform_device *bcm2835_audio;
struct vchiq_drvdata {
const unsigned int cache_line_size;
struct rpi_firmware *fw;
};
static struct vchiq_drvdata bcm2835_drvdata = {
.cache_line_size = 32,
};
static struct vchiq_drvdata bcm2836_drvdata = {
.cache_line_size = 64,
};
struct vchiq_arm_state {
/* Keepalive-related data */
struct task_struct *ka_thread;
struct completion ka_evt;
atomic_t ka_use_count;
atomic_t ka_use_ack_count;
atomic_t ka_release_count;
rwlock_t susp_res_lock;
struct vchiq_state *state;
/*
* Global use count for videocore.
* This is equal to the sum of the use counts for all services. When
* this hits zero the videocore suspend procedure will be initiated.
*/
int videocore_use_count;
/*
* Use count to track requests from videocore peer.
* This use count is not associated with a service, so needs to be
* tracked separately with the state.
*/
int peer_use_count;
/*
* Flag to indicate that the first vchiq connect has made it through.
* This means that both sides should be fully ready, and we should
* be able to suspend after this point.
*/
int first_connect;
};
struct vchiq_2835_state {
int inited;
struct vchiq_arm_state arm_state;
};
struct vchiq_pagelist_info {
struct pagelist *pagelist;
size_t pagelist_buffer_size;
dma_addr_t dma_addr;
enum dma_data_direction dma_dir;
unsigned int num_pages;
unsigned int pages_need_release;
struct page **pages;
struct scatterlist *scatterlist;
unsigned int scatterlist_mapped;
};
static void __iomem *g_regs;
/* This value is the size of the L2 cache lines as understood by the
* VPU firmware, which determines the required alignment of the
* offsets/sizes in pagelists.
*
* Modern VPU firmware looks for a DT "cache-line-size" property in
* the VCHIQ node and will overwrite it with the actual L2 cache size,
* which the kernel must then respect. That property was rejected
* upstream, so we have to use the VPU firmware's compatibility value
* of 32.
*/
static unsigned int g_cache_line_size = 32;
static unsigned int g_fragments_size;
static char *g_fragments_base;
static char *g_free_fragments;
static struct semaphore g_free_fragments_sema;
static DEFINE_SEMAPHORE(g_free_fragments_mutex, 1);
static int
vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
unsigned int size, enum vchiq_bulk_dir dir);
static irqreturn_t
vchiq_doorbell_irq(int irq, void *dev_id)
{
struct vchiq_state *state = dev_id;
irqreturn_t ret = IRQ_NONE;
unsigned int status;
/* Read (and clear) the doorbell */
status = readl(g_regs + BELL0);
if (status & ARM_DS_ACTIVE) { /* Was the doorbell rung? */
remote_event_pollall(state);
ret = IRQ_HANDLED;
}
return ret;
}
static void
cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
{
if (pagelistinfo->scatterlist_mapped) {
dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
pagelistinfo->num_pages, pagelistinfo->dma_dir);
}
if (pagelistinfo->pages_need_release)
unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
pagelistinfo->pagelist, pagelistinfo->dma_addr);
}
static inline bool
is_adjacent_block(u32 *addrs, u32 addr, unsigned int k)
{
u32 tmp;
if (!k)
return false;
tmp = (addrs[k - 1] & PAGE_MASK) +
(((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT);
return tmp == (addr & PAGE_MASK);
}
/* There is a potential problem with partial cache lines (pages?)
* at the ends of the block when reading. If the CPU accessed anything in
* the same line (page?) then it may have pulled old data into the cache,
* obscuring the new data underneath. We can solve this by transferring the
* partial cache lines separately, and allowing the ARM to copy into the
* cached area.
*/
static struct vchiq_pagelist_info *
create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
size_t count, unsigned short type)
{
struct pagelist *pagelist;
struct vchiq_pagelist_info *pagelistinfo;
struct page **pages;
u32 *addrs;
unsigned int num_pages, offset, i, k;
int actual_pages;
size_t pagelist_size;
struct scatterlist *scatterlist, *sg;
int dma_buffers;
dma_addr_t dma_addr;
if (count >= INT_MAX - PAGE_SIZE)
return NULL;
if (buf)
offset = (uintptr_t)buf & (PAGE_SIZE - 1);
else
offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
sizeof(struct vchiq_pagelist_info)) /
(sizeof(u32) + sizeof(pages[0]) +
sizeof(struct scatterlist)))
return NULL;
pagelist_size = sizeof(struct pagelist) +
(num_pages * sizeof(u32)) +
(num_pages * sizeof(pages[0]) +
(num_pages * sizeof(struct scatterlist))) +
sizeof(struct vchiq_pagelist_info);
/* Allocate enough storage to hold the page pointers and the page
* list
*/
pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
GFP_KERNEL);
vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
if (!pagelist)
return NULL;
addrs = pagelist->addrs;
pages = (struct page **)(addrs + num_pages);
scatterlist = (struct scatterlist *)(pages + num_pages);
pagelistinfo = (struct vchiq_pagelist_info *)
(scatterlist + num_pages);
pagelist->length = count;
pagelist->type = type;
pagelist->offset = offset;
/* Populate the fields of the pagelistinfo structure */
pagelistinfo->pagelist = pagelist;
pagelistinfo->pagelist_buffer_size = pagelist_size;
pagelistinfo->dma_addr = dma_addr;
pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE;
pagelistinfo->num_pages = num_pages;
pagelistinfo->pages_need_release = 0;
pagelistinfo->pages = pages;
pagelistinfo->scatterlist = scatterlist;
pagelistinfo->scatterlist_mapped = 0;
if (buf) {
unsigned long length = count;
unsigned int off = offset;
for (actual_pages = 0; actual_pages < num_pages;
actual_pages++) {
struct page *pg =
vmalloc_to_page((buf +
(actual_pages * PAGE_SIZE)));
size_t bytes = PAGE_SIZE - off;
if (!pg) {
cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
if (bytes > length)
bytes = length;
pages[actual_pages] = pg;
length -= bytes;
off = 0;
}
/* do not try and release vmalloc pages */
} else {
actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
type == PAGELIST_READ, pages);
if (actual_pages != num_pages) {
vchiq_log_info(vchiq_arm_log_level,
"%s - only %d/%d pages locked",
__func__, actual_pages, num_pages);
/* This is probably due to the process being killed */
if (actual_pages > 0)
unpin_user_pages(pages, actual_pages);
cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
/* release user pages */
pagelistinfo->pages_need_release = 1;
}
/*
* Initialize the scatterlist so that the magic cookie
* is filled if debugging is enabled
*/
sg_init_table(scatterlist, num_pages);
/* Now set the pages for each scatterlist */
for (i = 0; i < num_pages; i++) {
unsigned int len = PAGE_SIZE - offset;
if (len > count)
len = count;
sg_set_page(scatterlist + i, pages[i], len, offset);
offset = 0;
count -= len;
}
dma_buffers = dma_map_sg(instance->state->dev,
scatterlist,
num_pages,
pagelistinfo->dma_dir);
if (dma_buffers == 0) {
cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
pagelistinfo->scatterlist_mapped = 1;
/* Combine adjacent blocks for performance */
k = 0;
for_each_sg(scatterlist, sg, dma_buffers, i) {
u32 len = sg_dma_len(sg);
u32 addr = sg_dma_address(sg);
/* Note: addrs is the address + page_count - 1
* The firmware expects blocks after the first to be page-
* aligned and a multiple of the page size
*/
WARN_ON(len == 0);
WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
WARN_ON(i && (addr & ~PAGE_MASK));
if (is_adjacent_block(addrs, addr, k))
addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
else
addrs[k++] = (addr & PAGE_MASK) |
(((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
}
/* Partial cache lines (fragments) require special measures */
if ((type == PAGELIST_READ) &&
((pagelist->offset & (g_cache_line_size - 1)) ||
((pagelist->offset + pagelist->length) &
(g_cache_line_size - 1)))) {
char *fragments;
if (down_interruptible(&g_free_fragments_sema)) {
cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
WARN_ON(!g_free_fragments);
down(&g_free_fragments_mutex);
fragments = g_free_fragments;
WARN_ON(!fragments);
g_free_fragments = *(char **)g_free_fragments;
up(&g_free_fragments_mutex);
pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
(fragments - g_fragments_base) / g_fragments_size;
}
return pagelistinfo;
}
static void
free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
int actual)
{
struct pagelist *pagelist = pagelistinfo->pagelist;
struct page **pages = pagelistinfo->pages;
unsigned int num_pages = pagelistinfo->num_pages;
vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
__func__, pagelistinfo->pagelist, actual);
/*
* NOTE: dma_unmap_sg must be called before the
* cpu can touch any of the data/pages.
*/
dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
pagelistinfo->num_pages, pagelistinfo->dma_dir);
pagelistinfo->scatterlist_mapped = 0;
/* Deal with any partial cache lines (fragments) */
if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && g_fragments_base) {
char *fragments = g_fragments_base +
(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
g_fragments_size;
int head_bytes, tail_bytes;
head_bytes = (g_cache_line_size - pagelist->offset) &
(g_cache_line_size - 1);
tail_bytes = (pagelist->offset + actual) &
(g_cache_line_size - 1);
if ((actual >= 0) && (head_bytes != 0)) {
if (head_bytes > actual)
head_bytes = actual;
memcpy_to_page(pages[0],
pagelist->offset,
fragments,
head_bytes);
}
if ((actual >= 0) && (head_bytes < actual) &&
(tail_bytes != 0))
memcpy_to_page(pages[num_pages - 1],
(pagelist->offset + actual) &
(PAGE_SIZE - 1) & ~(g_cache_line_size - 1),
fragments + g_cache_line_size,
tail_bytes);
down(&g_free_fragments_mutex);
*(char **)fragments = g_free_fragments;
g_free_fragments = fragments;
up(&g_free_fragments_mutex);
up(&g_free_fragments_sema);
}
/* Need to mark all the pages dirty. */
if (pagelist->type != PAGELIST_WRITE &&
pagelistinfo->pages_need_release) {
unsigned int i;
for (i = 0; i < num_pages; i++)
set_page_dirty(pages[i]);
}
cleanup_pagelistinfo(instance, pagelistinfo);
}
static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
{
struct device *dev = &pdev->dev;
struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
struct rpi_firmware *fw = drvdata->fw;
struct vchiq_slot_zero *vchiq_slot_zero;
void *slot_mem;
dma_addr_t slot_phys;
u32 channelbase;
int slot_mem_size, frag_mem_size;
int err, irq, i;
/*
* VCHI messages between the CPU and firmware use
* 32-bit bus addresses.
*/
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err < 0)
return err;
g_cache_line_size = drvdata->cache_line_size;
g_fragments_size = 2 * g_cache_line_size;
/* Allocate space for the channels in coherent memory */
slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
&slot_phys, GFP_KERNEL);
if (!slot_mem) {
dev_err(dev, "could not allocate DMA memory\n");
return -ENOMEM;
}
WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
if (!vchiq_slot_zero)
return -ENOMEM;
vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
(int)slot_phys + slot_mem_size;
vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
MAX_FRAGMENTS;
g_fragments_base = (char *)slot_mem + slot_mem_size;
g_free_fragments = g_fragments_base;
for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
*(char **)&g_fragments_base[i * g_fragments_size] =
&g_fragments_base[(i + 1) * g_fragments_size];
}
*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
err = vchiq_init_state(state, vchiq_slot_zero, dev);
if (err)
return err;
g_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g_regs))
return PTR_ERR(g_regs);
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return irq;
err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
"VCHIQ doorbell", state);
if (err) {
dev_err(dev, "failed to register irq=%d\n", irq);
return err;
}
/* Send the base address of the slots to VideoCore */
channelbase = slot_phys;
err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
&channelbase, sizeof(channelbase));
if (err) {
dev_err(dev, "failed to send firmware property: %d\n", err);
return err;
}
if (channelbase) {
dev_err(dev, "failed to set channelbase (response: %x)\n",
channelbase);
return -ENXIO;
}
vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
vchiq_slot_zero, &slot_phys);
vchiq_call_connected_callbacks();
return 0;
}
static void
vchiq_arm_init_state(struct vchiq_state *state,
struct vchiq_arm_state *arm_state)
{
if (arm_state) {
rwlock_init(&arm_state->susp_res_lock);
init_completion(&arm_state->ka_evt);
atomic_set(&arm_state->ka_use_count, 0);
atomic_set(&arm_state->ka_use_ack_count, 0);
atomic_set(&arm_state->ka_release_count, 0);
arm_state->state = state;
arm_state->first_connect = 0;
}
}
int
vchiq_platform_init_state(struct vchiq_state *state)
{
struct vchiq_2835_state *platform_state;
state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
if (!state->platform_state)
return -ENOMEM;
platform_state = (struct vchiq_2835_state *)state->platform_state;
platform_state->inited = 1;
vchiq_arm_init_state(state, &platform_state->arm_state);
return 0;
}
static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
{
struct vchiq_2835_state *platform_state;
platform_state = (struct vchiq_2835_state *)state->platform_state;
WARN_ON_ONCE(!platform_state->inited);
return &platform_state->arm_state;
}
void
remote_event_signal(struct remote_event *event)
{
/*
* Ensure that all writes to shared data structures have completed
* before signalling the peer.
*/
wmb();
event->fired = 1;
dsb(sy); /* data barrier operation */
if (event->armed)
writel(0, g_regs + BELL2); /* trigger vc interrupt */
}
int
vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
void __user *uoffset, int size, int dir)
{
struct vchiq_pagelist_info *pagelistinfo;
pagelistinfo = create_pagelist(instance, offset, uoffset, size,
(dir == VCHIQ_BULK_RECEIVE)
? PAGELIST_READ
: PAGELIST_WRITE);
if (!pagelistinfo)
return -ENOMEM;
bulk->data = pagelistinfo->dma_addr;
/*
* Store the pagelistinfo address in remote_data,
* which isn't used by the slave.
*/
bulk->remote_data = pagelistinfo;
return 0;
}
void
vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
{
if (bulk && bulk->remote_data && bulk->actual)
free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
bulk->actual);
}
int vchiq_dump_platform_state(void *dump_context)
{
char buf[80];
int len;
len = snprintf(buf, sizeof(buf), " Platform: 2835 (VC master)");
return vchiq_dump(dump_context, buf, len + 1);
}
#define VCHIQ_INIT_RETRIES 10
int vchiq_initialise(struct vchiq_instance **instance_out)
{
struct vchiq_state *state;
struct vchiq_instance *instance = NULL;
int i, ret;
/*
* VideoCore may not be ready due to boot up timing.
* It may never be ready if kernel and firmware are mismatched,so don't
* block forever.
*/
for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
state = vchiq_get_state();
if (state)
break;
usleep_range(500, 600);
}
if (i == VCHIQ_INIT_RETRIES) {
vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__);
ret = -ENOTCONN;
goto failed;
} else if (i > 0) {
vchiq_log_warning(vchiq_core_log_level,
"%s: videocore initialized after %d retries\n", __func__, i);
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
vchiq_log_error(vchiq_core_log_level,
"%s: error allocating vchiq instance\n", __func__);
ret = -ENOMEM;
goto failed;
}
instance->connected = 0;
instance->state = state;
mutex_init(&instance->bulk_waiter_list_mutex);
INIT_LIST_HEAD(&instance->bulk_waiter_list);
*instance_out = instance;
ret = 0;
failed:
vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret);
return ret;
}
EXPORT_SYMBOL(vchiq_initialise);
void free_bulk_waiter(struct vchiq_instance *instance)
{
struct bulk_waiter_node *waiter, *next;
list_for_each_entry_safe(waiter, next,
&instance->bulk_waiter_list, list) {
list_del(&waiter->list);
vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d",
waiter, waiter->pid);
kfree(waiter);
}
}
int vchiq_shutdown(struct vchiq_instance *instance)
{
int status = 0;
struct vchiq_state *state = instance->state;
if (mutex_lock_killable(&state->mutex))
return -EAGAIN;
/* Remove all services */
vchiq_shutdown_internal(state, instance);
mutex_unlock(&state->mutex);
vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
free_bulk_waiter(instance);
kfree(instance);
return status;
}
EXPORT_SYMBOL(vchiq_shutdown);
static int vchiq_is_connected(struct vchiq_instance *instance)
{
return instance->connected;
}
int vchiq_connect(struct vchiq_instance *instance)
{
int status;
struct vchiq_state *state = instance->state;
if (mutex_lock_killable(&state->mutex)) {
vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
status = -EAGAIN;
goto failed;
}
status = vchiq_connect_internal(state, instance);
if (!status)
instance->connected = 1;
mutex_unlock(&state->mutex);
failed:
vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
return status;
}
EXPORT_SYMBOL(vchiq_connect);
static int
vchiq_add_service(struct vchiq_instance *instance,
const struct vchiq_service_params_kernel *params,
unsigned int *phandle)
{
int status;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
int srvstate;
*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
srvstate = vchiq_is_connected(instance)
? VCHIQ_SRVSTATE_LISTENING
: VCHIQ_SRVSTATE_HIDDEN;
service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
if (service) {
*phandle = service->handle;
status = 0;
} else {
status = -EINVAL;
}
vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
return status;
}
int
vchiq_open_service(struct vchiq_instance *instance,
const struct vchiq_service_params_kernel *params,
unsigned int *phandle)
{
int status = -EINVAL;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
if (!vchiq_is_connected(instance))
goto failed;
service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
if (service) {
*phandle = service->handle;
status = vchiq_open_service_internal(service, current->pid);
if (status) {
vchiq_remove_service(instance, service->handle);
*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
}
}
failed:
vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
return status;
}
EXPORT_SYMBOL(vchiq_open_service);
int
vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
{
int status;
while (1) {
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
status = vchiq_bulk_transfer(instance, handle,
(void *)data, NULL,
size, userdata, mode,
VCHIQ_BULK_TRANSMIT);
break;
case VCHIQ_BULK_MODE_BLOCKING:
status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
VCHIQ_BULK_TRANSMIT);
break;
default:
return -EINVAL;
}
/*
* vchiq_*_bulk_transfer() may return -EAGAIN, so we need
* to implement a retry mechanism since this function is
* supposed to block until queued
*/
if (status != -EAGAIN)
break;
msleep(1);
}
return status;
}
EXPORT_SYMBOL(vchiq_bulk_transmit);
int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
void *data, unsigned int size, void *userdata,
enum vchiq_bulk_mode mode)
{
int status;
while (1) {
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
status = vchiq_bulk_transfer(instance, handle, data, NULL,
size, userdata,
mode, VCHIQ_BULK_RECEIVE);
break;
case VCHIQ_BULK_MODE_BLOCKING:
status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
VCHIQ_BULK_RECEIVE);
break;
default:
return -EINVAL;
}
/*
* vchiq_*_bulk_transfer() may return -EAGAIN, so we need
* to implement a retry mechanism since this function is
* supposed to block until queued
*/
if (status != -EAGAIN)
break;
msleep(1);
}
return status;
}
EXPORT_SYMBOL(vchiq_bulk_receive);
static int
vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
unsigned int size, enum vchiq_bulk_dir dir)
{
struct vchiq_service *service;
int status;
struct bulk_waiter_node *waiter = NULL, *iter;
service = find_service_by_handle(instance, handle);
if (!service)
return -EINVAL;
vchiq_service_put(service);
mutex_lock(&instance->bulk_waiter_list_mutex);
list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
if (iter->pid == current->pid) {
list_del(&iter->list);
waiter = iter;
break;
}
}
mutex_unlock(&instance->bulk_waiter_list_mutex);
if (waiter) {
struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
if (bulk) {
/* This thread has an outstanding bulk transfer. */
/* FIXME: why compare a dma address to a pointer? */
if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
/*
* This is not a retry of the previous one.
* Cancel the signal when the transfer completes.
*/
spin_lock(&bulk_waiter_spinlock);
bulk->userdata = NULL;
spin_unlock(&bulk_waiter_spinlock);
}
}
} else {
waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
if (!waiter) {
vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
return -ENOMEM;
}
}
status = vchiq_bulk_transfer(instance, handle, data, NULL, size,
&waiter->bulk_waiter,
VCHIQ_BULK_MODE_BLOCKING, dir);
if ((status != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
if (bulk) {
/* Cancel the signal when the transfer completes. */
spin_lock(&bulk_waiter_spinlock);
bulk->userdata = NULL;
spin_unlock(&bulk_waiter_spinlock);
}
kfree(waiter);
} else {
waiter->pid = current->pid;
mutex_lock(&instance->bulk_waiter_list_mutex);
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter,
current->pid);
}
return status;
}
static int
add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
struct vchiq_header *header, struct user_service *user_service,
void *bulk_userdata)
{
struct vchiq_completion_data_kernel *completion;
int insert;
DEBUG_INITIALISE(g_state.local);
insert = instance->completion_insert;
while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
/* Out of space - wait for the client */
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__);
DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
if (wait_for_completion_interruptible(&instance->remove_event)) {
vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
return -EAGAIN;
} else if (instance->closing) {
vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
return 0;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
}
completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
completion->header = header;
completion->reason = reason;
/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
completion->service_userdata = user_service->service;
completion->bulk_userdata = bulk_userdata;
if (reason == VCHIQ_SERVICE_CLOSED) {
/*
* Take an extra reference, to be held until
* this CLOSED notification is delivered.
*/
vchiq_service_get(user_service->service);
if (instance->use_close_delivered)
user_service->close_pending = 1;
}
/*
* A write barrier is needed here to ensure that the entire completion
* record is written out before the insert point.
*/
wmb();
if (reason == VCHIQ_MESSAGE_AVAILABLE)
user_service->message_available_pos = insert;
insert++;
instance->completion_insert = insert;
complete(&instance->insert_event);
return 0;
}
int
service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
struct vchiq_header *header, unsigned int handle, void *bulk_userdata)
{
/*
* How do we ensure the callback goes to the right client?
* The service_user data points to a user_service record
* containing the original callback and the user state structure, which
* contains a circular buffer for completion records.
*/
struct user_service *user_service;
struct vchiq_service *service;
bool skip_completion = false;
DEBUG_INITIALISE(g_state.local);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
rcu_read_lock();
service = handle_to_service(instance, handle);
if (WARN_ON(!service)) {
rcu_read_unlock();
return 0;
}
user_service = (struct user_service *)service->base.userdata;
if (!instance || instance->closing) {
rcu_read_unlock();
return 0;
}
/*
* As hopping around different synchronization mechanism,
* taking an extra reference results in simpler implementation.
*/
vchiq_service_get(service);
rcu_read_unlock();
vchiq_log_trace(vchiq_arm_log_level,
"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
__func__, (unsigned long)user_service, service->localport,
user_service->userdata, reason, (unsigned long)header,
(unsigned long)instance, (unsigned long)bulk_userdata);
if (header && user_service->is_vchi) {
spin_lock(&msg_queue_spinlock);
while (user_service->msg_insert ==
(user_service->msg_remove + MSG_QUEUE_SIZE)) {
spin_unlock(&msg_queue_spinlock);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__);
/*
* If there is no MESSAGE_AVAILABLE in the completion
* queue, add one
*/
if ((user_service->message_available_pos -
instance->completion_remove) < 0) {
int status;
vchiq_log_info(vchiq_arm_log_level,
"Inserting extra MESSAGE_AVAILABLE");
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
status = add_completion(instance, reason, NULL, user_service,
bulk_userdata);
if (status) {
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_service_put(service);
return status;
}
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
if (wait_for_completion_interruptible(&user_service->remove_event)) {
vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_service_put(service);
return -EAGAIN;
} else if (instance->closing) {
vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_service_put(service);
return -EINVAL;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
spin_lock(&msg_queue_spinlock);
}
user_service->msg_queue[user_service->msg_insert &
(MSG_QUEUE_SIZE - 1)] = header;
user_service->msg_insert++;
/*
* If there is a thread waiting in DEQUEUE_MESSAGE, or if
* there is a MESSAGE_AVAILABLE in the completion queue then
* bypass the completion queue.
*/
if (((user_service->message_available_pos -
instance->completion_remove) >= 0) ||
user_service->dequeue_pending) {
user_service->dequeue_pending = 0;
skip_completion = true;
}
spin_unlock(&msg_queue_spinlock);
complete(&user_service->insert_event);
header = NULL;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_service_put(service);
if (skip_completion)
return 0;
return add_completion(instance, reason, header, user_service,
bulk_userdata);
}
int vchiq_dump(void *dump_context, const char *str, int len)
{
struct dump_context *context = (struct dump_context *)dump_context;
int copy_bytes;
if (context->actual >= context->space)
return 0;
if (context->offset > 0) {
int skip_bytes = min_t(int, len, context->offset);
str += skip_bytes;
len -= skip_bytes;
context->offset -= skip_bytes;
if (context->offset > 0)
return 0;
}
copy_bytes = min_t(int, len, context->space - context->actual);
if (copy_bytes == 0)
return 0;
if (copy_to_user(context->buf + context->actual, str,
copy_bytes))
return -EFAULT;
context->actual += copy_bytes;
len -= copy_bytes;
/*
* If the terminating NUL is included in the length, then it
* marks the end of a line and should be replaced with a
* carriage return.
*/
if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
char cr = '\n';
if (copy_to_user(context->buf + context->actual - 1,
&cr, 1))
return -EFAULT;
}
return 0;
}
int vchiq_dump_platform_instances(void *dump_context)
{
struct vchiq_state *state = vchiq_get_state();
char buf[80];
int len;
int i;
if (!state)
return -ENOTCONN;
/*
* There is no list of instances, so instead scan all services,
* marking those that have been dumped.
*/
rcu_read_lock();
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service;
struct vchiq_instance *instance;
service = rcu_dereference(state->services[i]);
if (!service || service->base.callback != service_callback)
continue;
instance = service->instance;
if (instance)
instance->mark = 0;
}
rcu_read_unlock();
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service;
struct vchiq_instance *instance;
int err;
rcu_read_lock();
service = rcu_dereference(state->services[i]);
if (!service || service->base.callback != service_callback) {
rcu_read_unlock();
continue;
}
instance = service->instance;
if (!instance || instance->mark) {
rcu_read_unlock();
continue;
}
rcu_read_unlock();
len = snprintf(buf, sizeof(buf),
"Instance %pK: pid %d,%s completions %d/%d",
instance, instance->pid,
instance->connected ? " connected, " :
"",
instance->completion_insert -
instance->completion_remove,
MAX_COMPLETIONS);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
instance->mark = 1;
}
return 0;
}
int vchiq_dump_platform_service_state(void *dump_context,
struct vchiq_service *service)
{
struct user_service *user_service =
(struct user_service *)service->base.userdata;
char buf[80];
int len;
len = scnprintf(buf, sizeof(buf), " instance %pK", service->instance);
if ((service->base.callback == service_callback) && user_service->is_vchi) {
len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
user_service->msg_insert - user_service->msg_remove,
MSG_QUEUE_SIZE);
if (user_service->dequeue_pending)
len += scnprintf(buf + len, sizeof(buf) - len,
" (dequeue pending)");
}
return vchiq_dump(dump_context, buf, len + 1);
}
struct vchiq_state *
vchiq_get_state(void)
{
if (!g_state.remote) {
pr_err("%s: g_state.remote == NULL\n", __func__);
return NULL;
}
if (g_state.remote->initialised != 1) {
pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
__func__, g_state.remote->initialised);
return NULL;
}
return &g_state;
}
/*
* Autosuspend related functionality
*/
static int
vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
enum vchiq_reason reason,
struct vchiq_header *header,
unsigned int service_user, void *bulk_user)
{
vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason);
return 0;
}
static int
vchiq_keepalive_thread_func(void *v)
{
struct vchiq_state *state = (struct vchiq_state *)v;
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
int status;
struct vchiq_instance *instance;
unsigned int ka_handle;
int ret;
struct vchiq_service_params_kernel params = {
.fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
.callback = vchiq_keepalive_vchiq_callback,
.version = KEEPALIVE_VER,
.version_min = KEEPALIVE_VER_MIN
};
ret = vchiq_initialise(&instance);
if (ret) {
vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__,
ret);
goto exit;
}
status = vchiq_connect(instance);
if (status) {
vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
status);
goto shutdown;
}
status = vchiq_add_service(instance, ¶ms, &ka_handle);
if (status) {
vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
status);
goto shutdown;
}
while (1) {
long rc = 0, uc = 0;
if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__);
flush_signals(current);
continue;
}
/*
* read and clear counters. Do release_count then use_count to
* prevent getting more releases than uses
*/
rc = atomic_xchg(&arm_state->ka_release_count, 0);
uc = atomic_xchg(&arm_state->ka_use_count, 0);
/*
* Call use/release service the requisite number of times.
* Process use before release so use counts don't go negative
*/
while (uc--) {
atomic_inc(&arm_state->ka_use_ack_count);
status = vchiq_use_service(instance, ka_handle);
if (status) {
vchiq_log_error(vchiq_susp_log_level,
"%s vchiq_use_service error %d", __func__, status);
}
}
while (rc--) {
status = vchiq_release_service(instance, ka_handle);
if (status) {
vchiq_log_error(vchiq_susp_log_level,
"%s vchiq_release_service error %d", __func__,
status);
}
}
}
shutdown:
vchiq_shutdown(instance);
exit:
return 0;
}
int
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
int ret = 0;
char entity[16];
int *entity_uc;
int local_uc;
if (!arm_state) {
ret = -EINVAL;
goto out;
}
if (use_type == USE_TYPE_VCHIQ) {
sprintf(entity, "VCHIQ: ");
entity_uc = &arm_state->peer_use_count;
} else if (service) {
sprintf(entity, "%c%c%c%c:%03d",
VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
service->client_id);
entity_uc = &service->service_use_count;
} else {
vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
ret = -EINVAL;
goto out;
}
write_lock_bh(&arm_state->susp_res_lock);
local_uc = ++arm_state->videocore_use_count;
++(*entity_uc);
vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
*entity_uc, local_uc);
write_unlock_bh(&arm_state->susp_res_lock);
if (!ret) {
int status = 0;
long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
while (ack_cnt && !status) {
/* Send the use notify to videocore */
status = vchiq_send_remote_use_active(state);
if (!status)
ack_cnt--;
else
atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
}
}
out:
vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
return ret;
}
int
vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
int ret = 0;
char entity[16];
int *entity_uc;
if (!arm_state) {
ret = -EINVAL;
goto out;
}
if (service) {
sprintf(entity, "%c%c%c%c:%03d",
VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
service->client_id);
entity_uc = &service->service_use_count;
} else {
sprintf(entity, "PEER: ");
entity_uc = &arm_state->peer_use_count;
}
write_lock_bh(&arm_state->susp_res_lock);
if (!arm_state->videocore_use_count || !(*entity_uc)) {
/* Don't use BUG_ON - don't allow user thread to crash kernel */
WARN_ON(!arm_state->videocore_use_count);
WARN_ON(!(*entity_uc));
ret = -EINVAL;
goto unlock;
}
--arm_state->videocore_use_count;
--(*entity_uc);
vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
*entity_uc, arm_state->videocore_use_count);
unlock:
write_unlock_bh(&arm_state->susp_res_lock);
out:
vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
return ret;
}
void
vchiq_on_remote_use(struct vchiq_state *state)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
atomic_inc(&arm_state->ka_use_count);
complete(&arm_state->ka_evt);
}
void
vchiq_on_remote_release(struct vchiq_state *state)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
atomic_inc(&arm_state->ka_release_count);
complete(&arm_state->ka_evt);
}
int
vchiq_use_service_internal(struct vchiq_service *service)
{
return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
}
int
vchiq_release_service_internal(struct vchiq_service *service)
{
return vchiq_release_internal(service->state, service);
}
struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
{
return &instance->debugfs_node;
}
int
vchiq_instance_get_use_count(struct vchiq_instance *instance)
{
struct vchiq_service *service;
int use_count = 0, i;
i = 0;
rcu_read_lock();
while ((service = __next_service_by_instance(instance->state,
instance, &i)))
use_count += service->service_use_count;
rcu_read_unlock();
return use_count;
}
int
vchiq_instance_get_pid(struct vchiq_instance *instance)
{
return instance->pid;
}
int
vchiq_instance_get_trace(struct vchiq_instance *instance)
{
return instance->trace;
}
void
vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
{
struct vchiq_service *service;
int i;
i = 0;
rcu_read_lock();
while ((service = __next_service_by_instance(instance->state,
instance, &i)))
service->trace = trace;
rcu_read_unlock();
instance->trace = (trace != 0);
}
int
vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
{
int ret = -EINVAL;
struct vchiq_service *service = find_service_by_handle(instance, handle);
if (service) {
ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
vchiq_service_put(service);
}
return ret;
}
EXPORT_SYMBOL(vchiq_use_service);
int
vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
{
int ret = -EINVAL;
struct vchiq_service *service = find_service_by_handle(instance, handle);
if (service) {
ret = vchiq_release_internal(service->state, service);
vchiq_service_put(service);
}
return ret;
}
EXPORT_SYMBOL(vchiq_release_service);
struct service_data_struct {
int fourcc;
int clientid;
int use_count;
};
void
vchiq_dump_service_use_state(struct vchiq_state *state)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
struct service_data_struct *service_data;
int i, found = 0;
/*
* If there's more than 64 services, only dump ones with
* non-zero counts
*/
int only_nonzero = 0;
static const char *nz = "<-- preventing suspend";
int peer_count;
int vc_use_count;
int active_services;
if (!arm_state)
return;
service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
GFP_KERNEL);
if (!service_data)
return;
read_lock_bh(&arm_state->susp_res_lock);
peer_count = arm_state->peer_use_count;
vc_use_count = arm_state->videocore_use_count;
active_services = state->unused_service;
if (active_services > MAX_SERVICES)
only_nonzero = 1;
rcu_read_lock();
for (i = 0; i < active_services; i++) {
struct vchiq_service *service_ptr =
rcu_dereference(state->services[i]);
if (!service_ptr)
continue;
if (only_nonzero && !service_ptr->service_use_count)
continue;
if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
continue;
service_data[found].fourcc = service_ptr->base.fourcc;
service_data[found].clientid = service_ptr->client_id;
service_data[found].use_count = service_ptr->service_use_count;
found++;
if (found >= MAX_SERVICES)
break;
}
rcu_read_unlock();
read_unlock_bh(&arm_state->susp_res_lock);
if (only_nonzero)
vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
active_services, found);
for (i = 0; i < found; i++) {
vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s",
VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
service_data[i].clientid, service_data[i].use_count,
service_data[i].use_count ? nz : "");
}
vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count);
vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
vc_use_count);
kfree(service_data);
}
int
vchiq_check_service(struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state;
int ret = -EINVAL;
if (!service || !service->state)
goto out;
arm_state = vchiq_platform_get_arm_state(service->state);
read_lock_bh(&arm_state->susp_res_lock);
if (service->service_use_count)
ret = 0;
read_unlock_bh(&arm_state->susp_res_lock);
if (ret) {
vchiq_log_error(vchiq_susp_log_level,
"%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
service->service_use_count, arm_state->videocore_use_count);
vchiq_dump_service_use_state(service->state);
}
out:
return ret;
}
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate oldstate,
enum vchiq_connstate newstate)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
char threadname[16];
vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
get_conn_state_name(oldstate), get_conn_state_name(newstate));
if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
return;
write_lock_bh(&arm_state->susp_res_lock);
if (arm_state->first_connect) {
write_unlock_bh(&arm_state->susp_res_lock);
return;
}
arm_state->first_connect = 1;
write_unlock_bh(&arm_state->susp_res_lock);
snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
state->id);
arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
(void *)state,
threadname);
if (IS_ERR(arm_state->ka_thread)) {
vchiq_log_error(vchiq_susp_log_level,
"vchiq: FATAL: couldn't create thread %s",
threadname);
} else {
wake_up_process(arm_state->ka_thread);
}
}
static const struct of_device_id vchiq_of_match[] = {
{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
{},
};
MODULE_DEVICE_TABLE(of, vchiq_of_match);
static struct platform_device *
vchiq_register_child(struct platform_device *pdev, const char *name)
{
struct platform_device_info pdevinfo;
struct platform_device *child;
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = &pdev->dev;
pdevinfo.name = name;
pdevinfo.id = PLATFORM_DEVID_NONE;
pdevinfo.dma_mask = DMA_BIT_MASK(32);
child = platform_device_register_full(&pdevinfo);
if (IS_ERR(child)) {
dev_warn(&pdev->dev, "%s not registered\n", name);
child = NULL;
}
return child;
}
static int vchiq_probe(struct platform_device *pdev)
{
struct device_node *fw_node;
const struct of_device_id *of_id;
struct vchiq_drvdata *drvdata;
int err;
of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
drvdata = (struct vchiq_drvdata *)of_id->data;
if (!drvdata)
return -EINVAL;
fw_node = of_find_compatible_node(NULL, NULL,
"raspberrypi,bcm2835-firmware");
if (!fw_node) {
dev_err(&pdev->dev, "Missing firmware node\n");
return -ENOENT;
}
drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
of_node_put(fw_node);
if (!drvdata->fw)
return -EPROBE_DEFER;
platform_set_drvdata(pdev, drvdata);
err = vchiq_platform_init(pdev, &g_state);
if (err)
goto failed_platform_init;
vchiq_debugfs_init();
vchiq_log_info(vchiq_arm_log_level,
"vchiq: platform initialised - version %d (min %d)",
VCHIQ_VERSION, VCHIQ_VERSION_MIN);
/*
* Simply exit on error since the function handles cleanup in
* cases of failure.
*/
err = vchiq_register_chrdev(&pdev->dev);
if (err) {
vchiq_log_warning(vchiq_arm_log_level,
"Failed to initialize vchiq cdev");
goto error_exit;
}
bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
return 0;
failed_platform_init:
vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
error_exit:
return err;
}
static void vchiq_remove(struct platform_device *pdev)
{
platform_device_unregister(bcm2835_audio);
platform_device_unregister(bcm2835_camera);
vchiq_debugfs_deinit();
vchiq_deregister_chrdev();
}
static struct platform_driver vchiq_driver = {
.driver = {
.name = "bcm2835_vchiq",
.of_match_table = vchiq_of_match,
},
.probe = vchiq_probe,
.remove_new = vchiq_remove,
};
static int __init vchiq_driver_init(void)
{
int ret;
ret = platform_driver_register(&vchiq_driver);
if (ret)
pr_err("Failed to register vchiq driver\n");
return ret;
}
module_init(vchiq_driver_init);
static void __exit vchiq_driver_exit(void)
{
platform_driver_unregister(&vchiq_driver);
}
module_exit(vchiq_driver_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Videocore VCHIQ driver");
MODULE_AUTHOR("Broadcom Corporation");
| linux-master | drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#include <linux/types.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/bitops.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/kref.h>
#include <linux/rcupdate.h>
#include <linux/sched/signal.h>
#include "vchiq_arm.h"
#include "vchiq_core.h"
#define VCHIQ_SLOT_HANDLER_STACK 8192
#define VCHIQ_MSG_PADDING 0 /* - */
#define VCHIQ_MSG_CONNECT 1 /* - */
#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
#define VCHIQ_MSG_PAUSE 10 /* - */
#define VCHIQ_MSG_RESUME 11 /* - */
#define VCHIQ_MSG_REMOTE_USE 12 /* - */
#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
#define TYPE_SHIFT 24
#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
#define VCHIQ_PORT_FREE 0x1000
#define VCHIQ_PORT_IS_VALID(port) ((port) < VCHIQ_PORT_FREE)
#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
(((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)(msgid) >> TYPE_SHIFT)
#define VCHIQ_MSG_SRCPORT(msgid) \
(unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff)
#define VCHIQ_MSG_DSTPORT(msgid) \
((unsigned short)(msgid) & 0xfff)
#define MAKE_CONNECT (VCHIQ_MSG_CONNECT << TYPE_SHIFT)
#define MAKE_OPEN(srcport) \
((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12))
#define MAKE_OPENACK(srcport, dstport) \
((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
#define MAKE_CLOSE(srcport, dstport) \
((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
#define MAKE_DATA(srcport, dstport) \
((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
#define MAKE_PAUSE (VCHIQ_MSG_PAUSE << TYPE_SHIFT)
#define MAKE_RESUME (VCHIQ_MSG_RESUME << TYPE_SHIFT)
#define MAKE_REMOTE_USE (VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT)
#define MAKE_REMOTE_USE_ACTIVE (VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT)
/* Ensure the fields are wide enough */
static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
== 0);
static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
static_assert((unsigned int)VCHIQ_PORT_MAX <
(unsigned int)VCHIQ_PORT_FREE);
#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
#define VCHIQ_MSGID_CLAIMED 0x40000000
#define VCHIQ_FOURCC_INVALID 0x00000000
#define VCHIQ_FOURCC_IS_LEGAL(fourcc) ((fourcc) != VCHIQ_FOURCC_INVALID)
#define VCHIQ_BULK_ACTUAL_ABORTED -1
#if VCHIQ_ENABLE_STATS
#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
(service->stats. stat += addend)
#else
#define VCHIQ_STATS_INC(state, stat) ((void)0)
#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
#endif
#define HANDLE_STATE_SHIFT 12
#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
#define SLOT_INDEX_FROM_DATA(state, data) \
(((unsigned int)((char *)data - (char *)state->slot_data)) / \
VCHIQ_SLOT_SIZE)
#define SLOT_INDEX_FROM_INFO(state, info) \
((unsigned int)(info - state->slot_info))
#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
#define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
(SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
#define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
#define SRVTRACE_LEVEL(srv) \
(((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
#define SRVTRACE_ENABLED(srv, lev) \
(((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
#define NO_CLOSE_RECVD 0
#define CLOSE_RECVD 1
#define NO_RETRY_POLL 0
#define RETRY_POLL 1
struct vchiq_open_payload {
int fourcc;
int client_id;
short version;
short version_min;
};
struct vchiq_openack_payload {
short version;
};
enum {
QMFLAGS_IS_BLOCKING = BIT(0),
QMFLAGS_NO_MUTEX_LOCK = BIT(1),
QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
};
enum {
VCHIQ_POLL_TERMINATE,
VCHIQ_POLL_REMOVE,
VCHIQ_POLL_TXNOTIFY,
VCHIQ_POLL_RXNOTIFY,
VCHIQ_POLL_COUNT
};
/* we require this for consistency between endpoints */
static_assert(sizeof(struct vchiq_header) == 8);
static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
static inline void check_sizes(void)
{
BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
}
/* Run time control of log level, based on KERN_XXX level. */
int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
DEFINE_SPINLOCK(bulk_waiter_spinlock);
static DEFINE_SPINLOCK(quota_spinlock);
static unsigned int handle_seq;
static const char *const srvstate_names[] = {
"FREE",
"HIDDEN",
"LISTENING",
"OPENING",
"OPEN",
"OPENSYNC",
"CLOSESENT",
"CLOSERECVD",
"CLOSEWAIT",
"CLOSED"
};
static const char *const reason_names[] = {
"SERVICE_OPENED",
"SERVICE_CLOSED",
"MESSAGE_AVAILABLE",
"BULK_TRANSMIT_DONE",
"BULK_RECEIVE_DONE",
"BULK_TRANSMIT_ABORTED",
"BULK_RECEIVE_ABORTED"
};
static const char *const conn_state_names[] = {
"DISCONNECTED",
"CONNECTING",
"CONNECTED",
"PAUSING",
"PAUSE_SENT",
"PAUSED",
"RESUMING",
"PAUSE_TIMEOUT",
"RESUME_TIMEOUT"
};
static void
release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
static const char *msg_type_str(unsigned int msg_type)
{
switch (msg_type) {
case VCHIQ_MSG_PADDING: return "PADDING";
case VCHIQ_MSG_CONNECT: return "CONNECT";
case VCHIQ_MSG_OPEN: return "OPEN";
case VCHIQ_MSG_OPENACK: return "OPENACK";
case VCHIQ_MSG_CLOSE: return "CLOSE";
case VCHIQ_MSG_DATA: return "DATA";
case VCHIQ_MSG_BULK_RX: return "BULK_RX";
case VCHIQ_MSG_BULK_TX: return "BULK_TX";
case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
case VCHIQ_MSG_PAUSE: return "PAUSE";
case VCHIQ_MSG_RESUME: return "RESUME";
case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
}
return "???";
}
static inline void
set_service_state(struct vchiq_service *service, int newstate)
{
vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
service->state->id, service->localport,
srvstate_names[service->srvstate],
srvstate_names[newstate]);
service->srvstate = newstate;
}
struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle)
{
int idx = handle & (VCHIQ_MAX_SERVICES - 1);
return rcu_dereference(instance->state->services[idx]);
}
struct vchiq_service *
find_service_by_handle(struct vchiq_instance *instance, unsigned int handle)
{
struct vchiq_service *service;
rcu_read_lock();
service = handle_to_service(instance, handle);
if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
service->handle == handle &&
kref_get_unless_zero(&service->ref_count)) {
service = rcu_pointer_handoff(service);
rcu_read_unlock();
return service;
}
rcu_read_unlock();
vchiq_log_info(vchiq_core_log_level,
"Invalid service handle 0x%x", handle);
return NULL;
}
struct vchiq_service *
find_service_by_port(struct vchiq_state *state, unsigned int localport)
{
if (localport <= VCHIQ_PORT_MAX) {
struct vchiq_service *service;
rcu_read_lock();
service = rcu_dereference(state->services[localport]);
if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
kref_get_unless_zero(&service->ref_count)) {
service = rcu_pointer_handoff(service);
rcu_read_unlock();
return service;
}
rcu_read_unlock();
}
vchiq_log_info(vchiq_core_log_level,
"Invalid port %u", localport);
return NULL;
}
struct vchiq_service *
find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
{
struct vchiq_service *service;
rcu_read_lock();
service = handle_to_service(instance, handle);
if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
service->handle == handle &&
service->instance == instance &&
kref_get_unless_zero(&service->ref_count)) {
service = rcu_pointer_handoff(service);
rcu_read_unlock();
return service;
}
rcu_read_unlock();
vchiq_log_info(vchiq_core_log_level,
"Invalid service handle 0x%x", handle);
return NULL;
}
struct vchiq_service *
find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
{
struct vchiq_service *service;
rcu_read_lock();
service = handle_to_service(instance, handle);
if (service &&
(service->srvstate == VCHIQ_SRVSTATE_FREE ||
service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
service->handle == handle &&
service->instance == instance &&
kref_get_unless_zero(&service->ref_count)) {
service = rcu_pointer_handoff(service);
rcu_read_unlock();
return service;
}
rcu_read_unlock();
vchiq_log_info(vchiq_core_log_level,
"Invalid service handle 0x%x", handle);
return service;
}
struct vchiq_service *
__next_service_by_instance(struct vchiq_state *state,
struct vchiq_instance *instance,
int *pidx)
{
struct vchiq_service *service = NULL;
int idx = *pidx;
while (idx < state->unused_service) {
struct vchiq_service *srv;
srv = rcu_dereference(state->services[idx]);
idx++;
if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
srv->instance == instance) {
service = srv;
break;
}
}
*pidx = idx;
return service;
}
struct vchiq_service *
next_service_by_instance(struct vchiq_state *state,
struct vchiq_instance *instance,
int *pidx)
{
struct vchiq_service *service;
rcu_read_lock();
while (1) {
service = __next_service_by_instance(state, instance, pidx);
if (!service)
break;
if (kref_get_unless_zero(&service->ref_count)) {
service = rcu_pointer_handoff(service);
break;
}
}
rcu_read_unlock();
return service;
}
void
vchiq_service_get(struct vchiq_service *service)
{
if (!service) {
WARN(1, "%s service is NULL\n", __func__);
return;
}
kref_get(&service->ref_count);
}
static void service_release(struct kref *kref)
{
struct vchiq_service *service =
container_of(kref, struct vchiq_service, ref_count);
struct vchiq_state *state = service->state;
WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
rcu_assign_pointer(state->services[service->localport], NULL);
if (service->userdata_term)
service->userdata_term(service->base.userdata);
kfree_rcu(service, rcu);
}
void
vchiq_service_put(struct vchiq_service *service)
{
if (!service) {
WARN(1, "%s: service is NULL\n", __func__);
return;
}
kref_put(&service->ref_count, service_release);
}
int
vchiq_get_client_id(struct vchiq_instance *instance, unsigned int handle)
{
struct vchiq_service *service;
int id;
rcu_read_lock();
service = handle_to_service(instance, handle);
id = service ? service->client_id : 0;
rcu_read_unlock();
return id;
}
void *
vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int handle)
{
void *userdata;
struct vchiq_service *service;
rcu_read_lock();
service = handle_to_service(instance, handle);
userdata = service ? service->base.userdata : NULL;
rcu_read_unlock();
return userdata;
}
EXPORT_SYMBOL(vchiq_get_service_userdata);
static void
mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
{
struct vchiq_state *state = service->state;
struct vchiq_service_quota *quota;
service->closing = 1;
/* Synchronise with other threads. */
mutex_lock(&state->recycle_mutex);
mutex_unlock(&state->recycle_mutex);
if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
/*
* If we're pausing then the slot_mutex is held until resume
* by the slot handler. Therefore don't try to acquire this
* mutex if we're the slot handler and in the pause sent state.
* We don't need to in this case anyway.
*/
mutex_lock(&state->slot_mutex);
mutex_unlock(&state->slot_mutex);
}
/* Unblock any sending thread. */
quota = &state->service_quotas[service->localport];
complete("a->quota_event);
}
static void
mark_service_closing(struct vchiq_service *service)
{
mark_service_closing_internal(service, 0);
}
static inline int
make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
struct vchiq_header *header, void *bulk_userdata)
{
int status;
vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
service->state->id, service->localport, reason_names[reason],
header, bulk_userdata);
status = service->base.callback(service->instance, reason, header, service->handle,
bulk_userdata);
if (status && (status != -EAGAIN)) {
vchiq_log_warning(vchiq_core_log_level,
"%d: ignoring ERROR from callback to service %x",
service->state->id, service->handle);
status = 0;
}
if (reason != VCHIQ_MESSAGE_AVAILABLE)
vchiq_release_message(service->instance, service->handle, header);
return status;
}
inline void
vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
{
enum vchiq_connstate oldstate = state->conn_state;
vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id, conn_state_names[oldstate],
conn_state_names[newstate]);
state->conn_state = newstate;
vchiq_platform_conn_state_changed(state, oldstate, newstate);
}
/* This initialises a single remote_event, and the associated wait_queue. */
static inline void
remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
{
event->armed = 0;
/*
* Don't clear the 'fired' flag because it may already have been set
* by the other side.
*/
init_waitqueue_head(wq);
}
/*
* All the event waiting routines in VCHIQ used a custom semaphore
* implementation that filtered most signals. This achieved a behaviour similar
* to the "killable" family of functions. While cleaning up this code all the
* routines where switched to the "interruptible" family of functions, as the
* former was deemed unjustified and the use "killable" set all VCHIQ's
* threads in D state.
*/
static inline int
remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
{
if (!event->fired) {
event->armed = 1;
dsb(sy);
if (wait_event_interruptible(*wq, event->fired)) {
event->armed = 0;
return 0;
}
event->armed = 0;
/* Ensure that the peer sees that we are not waiting (armed == 0). */
wmb();
}
event->fired = 0;
return 1;
}
/*
* Acknowledge that the event has been signalled, and wake any waiters. Usually
* called as a result of the doorbell being rung.
*/
static inline void
remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
{
event->fired = 1;
event->armed = 0;
wake_up_all(wq);
}
/* Check if a single event has been signalled, waking the waiters if it has. */
static inline void
remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
{
if (event->fired && event->armed)
remote_event_signal_local(wq, event);
}
/*
* VCHIQ used a small, fixed number of remote events. It is simplest to
* enumerate them here for polling.
*/
void
remote_event_pollall(struct vchiq_state *state)
{
remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
remote_event_poll(&state->sync_release_event, &state->local->sync_release);
remote_event_poll(&state->trigger_event, &state->local->trigger);
remote_event_poll(&state->recycle_event, &state->local->recycle);
}
/*
* Round up message sizes so that any space at the end of a slot is always big
* enough for a header. This relies on header size being a power of two, which
* has been verified earlier by a static assertion.
*/
static inline size_t
calc_stride(size_t size)
{
/* Allow room for the header */
size += sizeof(struct vchiq_header);
/* Round up */
return (size + sizeof(struct vchiq_header) - 1) &
~(sizeof(struct vchiq_header) - 1);
}
/* Called by the slot handler thread */
static struct vchiq_service *
get_listening_service(struct vchiq_state *state, int fourcc)
{
int i;
WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
rcu_read_lock();
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service;
service = rcu_dereference(state->services[i]);
if (service &&
service->public_fourcc == fourcc &&
(service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
(service->srvstate == VCHIQ_SRVSTATE_OPEN &&
service->remoteport == VCHIQ_PORT_FREE)) &&
kref_get_unless_zero(&service->ref_count)) {
service = rcu_pointer_handoff(service);
rcu_read_unlock();
return service;
}
}
rcu_read_unlock();
return NULL;
}
/* Called by the slot handler thread */
static struct vchiq_service *
get_connected_service(struct vchiq_state *state, unsigned int port)
{
int i;
rcu_read_lock();
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service =
rcu_dereference(state->services[i]);
if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
service->remoteport == port &&
kref_get_unless_zero(&service->ref_count)) {
service = rcu_pointer_handoff(service);
rcu_read_unlock();
return service;
}
}
rcu_read_unlock();
return NULL;
}
inline void
request_poll(struct vchiq_state *state, struct vchiq_service *service,
int poll_type)
{
u32 value;
int index;
if (!service)
goto skip_service;
do {
value = atomic_read(&service->poll_flags);
} while (atomic_cmpxchg(&service->poll_flags, value,
value | BIT(poll_type)) != value);
index = BITSET_WORD(service->localport);
do {
value = atomic_read(&state->poll_services[index]);
} while (atomic_cmpxchg(&state->poll_services[index],
value, value | BIT(service->localport & 0x1f)) != value);
skip_service:
state->poll_needed = 1;
/* Ensure the slot handler thread sees the poll_needed flag. */
wmb();
/* ... and ensure the slot handler runs. */
remote_event_signal_local(&state->trigger_event, &state->local->trigger);
}
/*
* Called from queue_message, by the slot handler and application threads,
* with slot_mutex held
*/
static struct vchiq_header *
reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
{
struct vchiq_shared_state *local = state->local;
int tx_pos = state->local_tx_pos;
int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
if (space > slot_space) {
struct vchiq_header *header;
/* Fill the remaining space with padding */
WARN_ON(!state->tx_data);
header = (struct vchiq_header *)
(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
header->msgid = VCHIQ_MSGID_PADDING;
header->size = slot_space - sizeof(struct vchiq_header);
tx_pos += slot_space;
}
/* If necessary, get the next slot. */
if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
int slot_index;
/* If there is no free slot... */
if (!try_wait_for_completion(&state->slot_available_event)) {
/* ...wait for one. */
VCHIQ_STATS_INC(state, slot_stalls);
/* But first, flush through the last slot. */
state->local_tx_pos = tx_pos;
local->tx_pos = tx_pos;
remote_event_signal(&state->remote->trigger);
if (!is_blocking ||
(wait_for_completion_interruptible(&state->slot_available_event)))
return NULL; /* No space available */
}
if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
complete(&state->slot_available_event);
pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
return NULL;
}
slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
state->tx_data =
(char *)SLOT_DATA_FROM_INDEX(state, slot_index);
}
state->local_tx_pos = tx_pos + space;
return (struct vchiq_header *)(state->tx_data +
(tx_pos & VCHIQ_SLOT_MASK));
}
static void
process_free_data_message(struct vchiq_state *state, u32 *service_found,
struct vchiq_header *header)
{
int msgid = header->msgid;
int port = VCHIQ_MSG_SRCPORT(msgid);
struct vchiq_service_quota *quota = &state->service_quotas[port];
int count;
spin_lock("a_spinlock);
count = quota->message_use_count;
if (count > 0)
quota->message_use_count = count - 1;
spin_unlock("a_spinlock);
if (count == quota->message_quota) {
/*
* Signal the service that it
* has dropped below its quota
*/
complete("a->quota_event);
} else if (count == 0) {
vchiq_log_error(vchiq_core_log_level,
"service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
port, quota->message_use_count, header, msgid, header->msgid,
header->size);
WARN(1, "invalid message use count\n");
}
if (!BITSET_IS_SET(service_found, port)) {
/* Set the found bit for this service */
BITSET_SET(service_found, port);
spin_lock("a_spinlock);
count = quota->slot_use_count;
if (count > 0)
quota->slot_use_count = count - 1;
spin_unlock("a_spinlock);
if (count > 0) {
/*
* Signal the service in case
* it has dropped below its quota
*/
complete("a->quota_event);
vchiq_log_trace(vchiq_core_log_level, "%d: pfq:%d %x@%pK - slot_use->%d",
state->id, port, header->size, header, count - 1);
} else {
vchiq_log_error(vchiq_core_log_level,
"service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
port, count, header, msgid, header->msgid, header->size);
WARN(1, "bad slot use count\n");
}
}
}
/* Called by the recycle thread. */
static void
process_free_queue(struct vchiq_state *state, u32 *service_found,
size_t length)
{
struct vchiq_shared_state *local = state->local;
int slot_queue_available;
/*
* Find slots which have been freed by the other side, and return them
* to the available queue.
*/
slot_queue_available = state->slot_queue_available;
/*
* Use a memory barrier to ensure that any state that may have been
* modified by another thread is not masked by stale prefetched
* values.
*/
mb();
while (slot_queue_available != local->slot_queue_recycle) {
unsigned int pos;
int slot_index = local->slot_queue[slot_queue_available &
VCHIQ_SLOT_QUEUE_MASK];
char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
int data_found = 0;
slot_queue_available++;
/*
* Beware of the address dependency - data is calculated
* using an index written by the other side.
*/
rmb();
vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
state->id, slot_index, data, local->slot_queue_recycle,
slot_queue_available);
/* Initialise the bitmask for services which have used this slot */
memset(service_found, 0, length);
pos = 0;
while (pos < VCHIQ_SLOT_SIZE) {
struct vchiq_header *header =
(struct vchiq_header *)(data + pos);
int msgid = header->msgid;
if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
process_free_data_message(state, service_found,
header);
data_found = 1;
}
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
"pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
}
if (data_found) {
int count;
spin_lock("a_spinlock);
count = state->data_use_count;
if (count > 0)
state->data_use_count = count - 1;
spin_unlock("a_spinlock);
if (count == state->data_quota)
complete(&state->data_quota_event);
}
/*
* Don't allow the slot to be reused until we are no
* longer interested in it.
*/
mb();
state->slot_queue_available = slot_queue_available;
complete(&state->slot_available_event);
}
}
static ssize_t
memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize)
{
memcpy(dest + offset, context + offset, maxsize);
return maxsize;
}
static ssize_t
copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset,
size_t maxsize),
void *context,
void *dest,
size_t size)
{
size_t pos = 0;
while (pos < size) {
ssize_t callback_result;
size_t max_bytes = size - pos;
callback_result = copy_callback(context, dest + pos, pos,
max_bytes);
if (callback_result < 0)
return callback_result;
if (!callback_result)
return -EIO;
if (callback_result > max_bytes)
return -EIO;
pos += callback_result;
}
return size;
}
/* Called by the slot handler and application threads */
static int
queue_message(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context, size_t size, int flags)
{
struct vchiq_shared_state *local;
struct vchiq_service_quota *quota = NULL;
struct vchiq_header *header;
int type = VCHIQ_MSG_TYPE(msgid);
size_t stride;
local = state->local;
stride = calc_stride(size);
WARN_ON(stride > VCHIQ_SLOT_SIZE);
if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
mutex_lock_killable(&state->slot_mutex))
return -EAGAIN;
if (type == VCHIQ_MSG_DATA) {
int tx_end_index;
if (!service) {
WARN(1, "%s: service is NULL\n", __func__);
mutex_unlock(&state->slot_mutex);
return -EINVAL;
}
WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
QMFLAGS_NO_MUTEX_UNLOCK));
if (service->closing) {
/* The service has been closed */
mutex_unlock(&state->slot_mutex);
return -EHOSTDOWN;
}
quota = &state->service_quotas[service->localport];
spin_lock("a_spinlock);
/*
* Ensure this service doesn't use more than its quota of
* messages or slots
*/
tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
/*
* Ensure data messages don't use more than their quota of
* slots
*/
while ((tx_end_index != state->previous_data_index) &&
(state->data_use_count == state->data_quota)) {
VCHIQ_STATS_INC(state, data_stalls);
spin_unlock("a_spinlock);
mutex_unlock(&state->slot_mutex);
if (wait_for_completion_interruptible(&state->data_quota_event))
return -EAGAIN;
mutex_lock(&state->slot_mutex);
spin_lock("a_spinlock);
tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
if ((tx_end_index == state->previous_data_index) ||
(state->data_use_count < state->data_quota)) {
/* Pass the signal on to other waiters */
complete(&state->data_quota_event);
break;
}
}
while ((quota->message_use_count == quota->message_quota) ||
((tx_end_index != quota->previous_tx_index) &&
(quota->slot_use_count == quota->slot_quota))) {
spin_unlock("a_spinlock);
vchiq_log_trace(vchiq_core_log_level,
"%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
state->id, service->localport, msg_type_str(type), size,
quota->message_use_count, quota->slot_use_count);
VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
mutex_unlock(&state->slot_mutex);
if (wait_for_completion_interruptible("a->quota_event))
return -EAGAIN;
if (service->closing)
return -EHOSTDOWN;
if (mutex_lock_killable(&state->slot_mutex))
return -EAGAIN;
if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
/* The service has been closed */
mutex_unlock(&state->slot_mutex);
return -EHOSTDOWN;
}
spin_lock("a_spinlock);
tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
}
spin_unlock("a_spinlock);
}
header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
if (!header) {
if (service)
VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
/*
* In the event of a failure, return the mutex to the
* state it was in
*/
if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
mutex_unlock(&state->slot_mutex);
return -EAGAIN;
}
if (type == VCHIQ_MSG_DATA) {
ssize_t callback_result;
int tx_end_index;
int slot_use_count;
vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
QMFLAGS_NO_MUTEX_UNLOCK));
callback_result =
copy_message_data(copy_callback, context,
header->data, size);
if (callback_result < 0) {
mutex_unlock(&state->slot_mutex);
VCHIQ_SERVICE_STATS_INC(service, error_count);
return -EINVAL;
}
if (SRVTRACE_ENABLED(service,
VCHIQ_LOG_INFO))
vchiq_log_dump_mem("Sent", 0,
header->data,
min_t(size_t, 16, callback_result));
spin_lock("a_spinlock);
quota->message_use_count++;
tx_end_index =
SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
/*
* If this transmission can't fit in the last slot used by any
* service, the data_use_count must be increased.
*/
if (tx_end_index != state->previous_data_index) {
state->previous_data_index = tx_end_index;
state->data_use_count++;
}
/*
* If this isn't the same slot last used by this service,
* the service's slot_use_count must be increased.
*/
if (tx_end_index != quota->previous_tx_index) {
quota->previous_tx_index = tx_end_index;
slot_use_count = ++quota->slot_use_count;
} else {
slot_use_count = 0;
}
spin_unlock("a_spinlock);
if (slot_use_count)
vchiq_log_trace(vchiq_core_log_level,
"%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", state->id,
service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
size, slot_use_count, header);
VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
} else {
vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
if (size != 0) {
/*
* It is assumed for now that this code path
* only happens from calls inside this file.
*
* External callers are through the vchiq_queue_message
* path which always sets the type to be VCHIQ_MSG_DATA
*
* At first glance this appears to be correct but
* more review is needed.
*/
copy_message_data(copy_callback, context,
header->data, size);
}
VCHIQ_STATS_INC(state, ctrl_tx_count);
}
header->msgid = msgid;
header->size = size;
{
int svc_fourcc;
svc_fourcc = service
? service->base.fourcc
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
vchiq_log_info(SRVTRACE_LEVEL(service),
"Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
VCHIQ_MSG_DSTPORT(msgid), size);
}
/* Make sure the new header is visible to the peer. */
wmb();
/* Make the new tx_pos visible to the peer. */
local->tx_pos = state->local_tx_pos;
wmb();
if (service && (type == VCHIQ_MSG_CLOSE))
set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
mutex_unlock(&state->slot_mutex);
remote_event_signal(&state->remote->trigger);
return 0;
}
/* Called by the slot handler and application threads */
static int
queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context, int size, int is_blocking)
{
struct vchiq_shared_state *local;
struct vchiq_header *header;
ssize_t callback_result;
local = state->local;
if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
mutex_lock_killable(&state->sync_mutex))
return -EAGAIN;
remote_event_wait(&state->sync_release_event, &local->sync_release);
/* Ensure that reads don't overtake the remote_event_wait. */
rmb();
header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
local->slot_sync);
{
int oldmsgid = header->msgid;
if (oldmsgid != VCHIQ_MSGID_PADDING)
vchiq_log_error(vchiq_core_log_level, "%d: qms - msgid %x, not PADDING",
state->id, oldmsgid);
}
vchiq_log_info(vchiq_sync_log_level,
"%d: qms %s@%pK,%x (%d->%d)", state->id,
msg_type_str(VCHIQ_MSG_TYPE(msgid)),
header, size, VCHIQ_MSG_SRCPORT(msgid),
VCHIQ_MSG_DSTPORT(msgid));
callback_result =
copy_message_data(copy_callback, context,
header->data, size);
if (callback_result < 0) {
mutex_unlock(&state->slot_mutex);
VCHIQ_SERVICE_STATS_INC(service, error_count);
return -EINVAL;
}
if (service) {
if (SRVTRACE_ENABLED(service,
VCHIQ_LOG_INFO))
vchiq_log_dump_mem("Sent", 0,
header->data,
min_t(size_t, 16, callback_result));
VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
} else {
VCHIQ_STATS_INC(state, ctrl_tx_count);
}
header->size = size;
header->msgid = msgid;
if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
int svc_fourcc;
svc_fourcc = service
? service->base.fourcc
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
vchiq_log_trace(vchiq_sync_log_level,
"Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
VCHIQ_MSG_DSTPORT(msgid), size);
}
remote_event_signal(&state->remote->sync_trigger);
if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
mutex_unlock(&state->sync_mutex);
return 0;
}
static inline void
claim_slot(struct vchiq_slot_info *slot)
{
slot->use_count++;
}
static void
release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
struct vchiq_header *header, struct vchiq_service *service)
{
mutex_lock(&state->recycle_mutex);
if (header) {
int msgid = header->msgid;
if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) {
mutex_unlock(&state->recycle_mutex);
return;
}
/* Rewrite the message header to prevent a double release */
header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
}
slot_info->release_count++;
if (slot_info->release_count == slot_info->use_count) {
int slot_queue_recycle;
/* Add to the freed queue */
/*
* A read barrier is necessary here to prevent speculative
* fetches of remote->slot_queue_recycle from overtaking the
* mutex.
*/
rmb();
slot_queue_recycle = state->remote->slot_queue_recycle;
state->remote->slot_queue[slot_queue_recycle &
VCHIQ_SLOT_QUEUE_MASK] =
SLOT_INDEX_FROM_INFO(state, slot_info);
state->remote->slot_queue_recycle = slot_queue_recycle + 1;
vchiq_log_info(vchiq_core_log_level, "%d: %s %d - recycle->%x", state->id, __func__,
SLOT_INDEX_FROM_INFO(state, slot_info),
state->remote->slot_queue_recycle);
/*
* A write barrier is necessary, but remote_event_signal
* contains one.
*/
remote_event_signal(&state->remote->recycle);
}
mutex_unlock(&state->recycle_mutex);
}
static inline enum vchiq_reason
get_bulk_reason(struct vchiq_bulk *bulk)
{
if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
return VCHIQ_BULK_TRANSMIT_ABORTED;
return VCHIQ_BULK_TRANSMIT_DONE;
}
if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
return VCHIQ_BULK_RECEIVE_ABORTED;
return VCHIQ_BULK_RECEIVE_DONE;
}
/* Called by the slot handler - don't hold the bulk mutex */
static int
notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
int retry_poll)
{
int status = 0;
vchiq_log_trace(vchiq_core_log_level, "%d: nb:%d %cx - p=%x rn=%x r=%x", service->state->id,
service->localport, (queue == &service->bulk_tx) ? 't' : 'r',
queue->process, queue->remote_notify, queue->remove);
queue->remote_notify = queue->process;
while (queue->remove != queue->remote_notify) {
struct vchiq_bulk *bulk =
&queue->bulks[BULK_INDEX(queue->remove)];
/*
* Only generate callbacks for non-dummy bulk
* requests, and non-terminated services
*/
if (bulk->data && service->instance) {
if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count);
VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes,
bulk->actual);
} else {
VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count);
VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes,
bulk->actual);
}
} else {
VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count);
}
if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
struct bulk_waiter *waiter;
spin_lock(&bulk_waiter_spinlock);
waiter = bulk->userdata;
if (waiter) {
waiter->actual = bulk->actual;
complete(&waiter->event);
}
spin_unlock(&bulk_waiter_spinlock);
} else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
enum vchiq_reason reason =
get_bulk_reason(bulk);
status = make_service_callback(service, reason, NULL,
bulk->userdata);
if (status == -EAGAIN)
break;
}
}
queue->remove++;
complete(&service->bulk_remove_event);
}
if (!retry_poll)
status = 0;
if (status == -EAGAIN)
request_poll(service->state, service, (queue == &service->bulk_tx) ?
VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
return status;
}
static void
poll_services_of_group(struct vchiq_state *state, int group)
{
u32 flags = atomic_xchg(&state->poll_services[group], 0);
int i;
for (i = 0; flags; i++) {
struct vchiq_service *service;
u32 service_flags;
if ((flags & BIT(i)) == 0)
continue;
service = find_service_by_port(state, (group << 5) + i);
flags &= ~BIT(i);
if (!service)
continue;
service_flags = atomic_xchg(&service->poll_flags, 0);
if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d",
state->id, service->localport,
service->remoteport);
/*
* Make it look like a client, because
* it must be removed and not left in
* the LISTENING state.
*/
service->public_fourcc = VCHIQ_FOURCC_INVALID;
if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
request_poll(state, service, VCHIQ_POLL_REMOVE);
} else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
vchiq_log_info(vchiq_core_log_level, "%d: ps - terminate %d<->%d",
state->id, service->localport, service->remoteport);
if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
request_poll(state, service, VCHIQ_POLL_TERMINATE);
}
if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
notify_bulks(service, &service->bulk_tx, RETRY_POLL);
if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
notify_bulks(service, &service->bulk_rx, RETRY_POLL);
vchiq_service_put(service);
}
}
/* Called by the slot handler thread */
static void
poll_services(struct vchiq_state *state)
{
int group;
for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
poll_services_of_group(state, group);
}
/* Called with the bulk_mutex held */
static void
abort_outstanding_bulks(struct vchiq_service *service,
struct vchiq_bulk_queue *queue)
{
int is_tx = (queue == &service->bulk_tx);
vchiq_log_trace(vchiq_core_log_level, "%d: aob:%d %cx - li=%x ri=%x p=%x",
service->state->id, service->localport, is_tx ? 't' : 'r',
queue->local_insert, queue->remote_insert, queue->process);
WARN_ON((int)(queue->local_insert - queue->process) < 0);
WARN_ON((int)(queue->remote_insert - queue->process) < 0);
while ((queue->process != queue->local_insert) ||
(queue->process != queue->remote_insert)) {
struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)];
if (queue->process == queue->remote_insert) {
/* fabricate a matching dummy bulk */
bulk->remote_data = NULL;
bulk->remote_size = 0;
queue->remote_insert++;
}
if (queue->process != queue->local_insert) {
vchiq_complete_bulk(service->instance, bulk);
vchiq_log_info(SRVTRACE_LEVEL(service),
"%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
is_tx ? "Send Bulk to" : "Recv Bulk from",
VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
service->remoteport, bulk->size, bulk->remote_size);
} else {
/* fabricate a matching dummy bulk */
bulk->data = 0;
bulk->size = 0;
bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
VCHIQ_BULK_RECEIVE;
queue->local_insert++;
}
queue->process++;
}
}
static int
parse_open(struct vchiq_state *state, struct vchiq_header *header)
{
const struct vchiq_open_payload *payload;
struct vchiq_service *service = NULL;
int msgid, size;
unsigned int localport, remoteport, fourcc;
short version, version_min;
msgid = header->msgid;
size = header->size;
localport = VCHIQ_MSG_DSTPORT(msgid);
remoteport = VCHIQ_MSG_SRCPORT(msgid);
if (size < sizeof(struct vchiq_open_payload))
goto fail_open;
payload = (struct vchiq_open_payload *)header->data;
fourcc = payload->fourcc;
vchiq_log_info(vchiq_core_log_level, "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
state->id, header, localport, VCHIQ_FOURCC_AS_4CHARS(fourcc));
service = get_listening_service(state, fourcc);
if (!service)
goto fail_open;
/* A matching service exists */
version = payload->version;
version_min = payload->version_min;
if ((service->version < version_min) || (version < service->version_min)) {
/* Version mismatch */
vchiq_loud_error_header();
vchiq_loud_error("%d: service %d (%c%c%c%c) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
state->id, service->localport, VCHIQ_FOURCC_AS_4CHARS(fourcc),
service->version, service->version_min, version, version_min);
vchiq_loud_error_footer();
vchiq_service_put(service);
service = NULL;
goto fail_open;
}
service->peer_version = version;
if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
struct vchiq_openack_payload ack_payload = {
service->version
};
int openack_id = MAKE_OPENACK(service->localport, remoteport);
if (state->version_common <
VCHIQ_VERSION_SYNCHRONOUS_MODE)
service->sync = 0;
/* Acknowledge the OPEN */
if (service->sync) {
if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback,
&ack_payload, sizeof(ack_payload), 0) == -EAGAIN)
goto bail_not_ready;
/* The service is now open */
set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
} else {
if (queue_message(state, NULL, openack_id, memcpy_copy_callback,
&ack_payload, sizeof(ack_payload), 0) == -EAGAIN)
goto bail_not_ready;
/* The service is now open */
set_service_state(service, VCHIQ_SRVSTATE_OPEN);
}
}
/* Success - the message has been dealt with */
vchiq_service_put(service);
return 1;
fail_open:
/* No available service, or an invalid request - send a CLOSE */
if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
NULL, NULL, 0, 0) == -EAGAIN)
goto bail_not_ready;
return 1;
bail_not_ready:
if (service)
vchiq_service_put(service);
return 0;
}
/**
* parse_message() - parses a single message from the rx slot
* @state: vchiq state struct
* @header: message header
*
* Context: Process context
*
* Return:
* * >= 0 - size of the parsed message payload (without header)
* * -EINVAL - fatal error occurred, bail out is required
*/
static int
parse_message(struct vchiq_state *state, struct vchiq_header *header)
{
struct vchiq_service *service = NULL;
unsigned int localport, remoteport;
int msgid, size, type, ret = -EINVAL;
DEBUG_INITIALISE(state->local);
DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
msgid = header->msgid;
DEBUG_VALUE(PARSE_MSGID, msgid);
size = header->size;
type = VCHIQ_MSG_TYPE(msgid);
localport = VCHIQ_MSG_DSTPORT(msgid);
remoteport = VCHIQ_MSG_SRCPORT(msgid);
if (type != VCHIQ_MSG_DATA)
VCHIQ_STATS_INC(state, ctrl_rx_count);
switch (type) {
case VCHIQ_MSG_OPENACK:
case VCHIQ_MSG_CLOSE:
case VCHIQ_MSG_DATA:
case VCHIQ_MSG_BULK_RX:
case VCHIQ_MSG_BULK_TX:
case VCHIQ_MSG_BULK_RX_DONE:
case VCHIQ_MSG_BULK_TX_DONE:
service = find_service_by_port(state, localport);
if ((!service ||
((service->remoteport != remoteport) &&
(service->remoteport != VCHIQ_PORT_FREE))) &&
(localport == 0) &&
(type == VCHIQ_MSG_CLOSE)) {
/*
* This could be a CLOSE from a client which
* hadn't yet received the OPENACK - look for
* the connected service
*/
if (service)
vchiq_service_put(service);
service = get_connected_service(state, remoteport);
if (service)
vchiq_log_warning(vchiq_core_log_level,
"%d: prs %s@%pK (%d->%d) - found connected service %d",
state->id, msg_type_str(type), header,
remoteport, localport, service->localport);
}
if (!service) {
vchiq_log_error(vchiq_core_log_level,
"%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
state->id, msg_type_str(type), header, remoteport,
localport, localport);
goto skip_message;
}
break;
default:
break;
}
if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
int svc_fourcc;
svc_fourcc = service
? service->base.fourcc
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
vchiq_log_info(SRVTRACE_LEVEL(service),
"Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
msg_type_str(type), type, VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
remoteport, localport, size);
if (size > 0)
vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
}
if (((unsigned long)header & VCHIQ_SLOT_MASK) +
calc_stride(size) > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
"header %pK (msgid %x) - size %x too big for slot",
header, (unsigned int)msgid, (unsigned int)size);
WARN(1, "oversized for slot\n");
}
switch (type) {
case VCHIQ_MSG_OPEN:
WARN_ON(VCHIQ_MSG_DSTPORT(msgid));
if (!parse_open(state, header))
goto bail_not_ready;
break;
case VCHIQ_MSG_OPENACK:
if (size >= sizeof(struct vchiq_openack_payload)) {
const struct vchiq_openack_payload *payload =
(struct vchiq_openack_payload *)
header->data;
service->peer_version = payload->version;
}
vchiq_log_info(vchiq_core_log_level, "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
state->id, header, size, remoteport, localport,
service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
set_service_state(service, VCHIQ_SRVSTATE_OPEN);
complete(&service->remove_event);
} else {
vchiq_log_error(vchiq_core_log_level, "OPENACK received in state %s",
srvstate_names[service->srvstate]);
}
break;
case VCHIQ_MSG_CLOSE:
WARN_ON(size); /* There should be no data */
vchiq_log_info(vchiq_core_log_level, "%d: prs CLOSE@%pK (%d->%d)",
state->id, header, remoteport, localport);
mark_service_closing_internal(service, 1);
if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN)
goto bail_not_ready;
vchiq_log_info(vchiq_core_log_level, "Close Service %c%c%c%c s:%u d:%d",
VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
service->localport, service->remoteport);
break;
case VCHIQ_MSG_DATA:
vchiq_log_info(vchiq_core_log_level, "%d: prs DATA@%pK,%x (%d->%d)",
state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
(service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
claim_slot(state->rx_info);
DEBUG_TRACE(PARSE_LINE);
if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
NULL) == -EAGAIN) {
DEBUG_TRACE(PARSE_LINE);
goto bail_not_ready;
}
VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size);
} else {
VCHIQ_STATS_INC(state, error_count);
}
break;
case VCHIQ_MSG_CONNECT:
vchiq_log_info(vchiq_core_log_level, "%d: prs CONNECT@%pK", state->id, header);
state->version_common = ((struct vchiq_slot_zero *)
state->slot_data)->version;
complete(&state->connect);
break;
case VCHIQ_MSG_BULK_RX:
case VCHIQ_MSG_BULK_TX:
/*
* We should never receive a bulk request from the
* other side since we're not setup to perform as the
* master.
*/
WARN_ON(1);
break;
case VCHIQ_MSG_BULK_RX_DONE:
case VCHIQ_MSG_BULK_TX_DONE:
if ((service->remoteport == remoteport) &&
(service->srvstate != VCHIQ_SRVSTATE_FREE)) {
struct vchiq_bulk_queue *queue;
struct vchiq_bulk *bulk;
queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
&service->bulk_rx : &service->bulk_tx;
DEBUG_TRACE(PARSE_LINE);
if (mutex_lock_killable(&service->bulk_mutex)) {
DEBUG_TRACE(PARSE_LINE);
goto bail_not_ready;
}
if ((int)(queue->remote_insert -
queue->local_insert) >= 0) {
vchiq_log_error(vchiq_core_log_level,
"%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
state->id, msg_type_str(type), header, remoteport,
localport, queue->remote_insert,
queue->local_insert);
mutex_unlock(&service->bulk_mutex);
break;
}
if (queue->process != queue->remote_insert) {
pr_err("%s: p %x != ri %x\n",
__func__,
queue->process,
queue->remote_insert);
mutex_unlock(&service->bulk_mutex);
goto bail_not_ready;
}
bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)];
bulk->actual = *(int *)header->data;
queue->remote_insert++;
vchiq_log_info(vchiq_core_log_level, "%d: prs %s@%pK (%d->%d) %x@%pad",
state->id, msg_type_str(type), header, remoteport, localport,
bulk->actual, &bulk->data);
vchiq_log_trace(vchiq_core_log_level, "%d: prs:%d %cx li=%x ri=%x p=%x",
state->id, localport,
(type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
queue->local_insert, queue->remote_insert, queue->process);
DEBUG_TRACE(PARSE_LINE);
WARN_ON(queue->process == queue->local_insert);
vchiq_complete_bulk(service->instance, bulk);
queue->process++;
mutex_unlock(&service->bulk_mutex);
DEBUG_TRACE(PARSE_LINE);
notify_bulks(service, queue, RETRY_POLL);
DEBUG_TRACE(PARSE_LINE);
}
break;
case VCHIQ_MSG_PADDING:
vchiq_log_trace(vchiq_core_log_level, "%d: prs PADDING@%pK,%x",
state->id, header, size);
break;
case VCHIQ_MSG_PAUSE:
/* If initiated, signal the application thread */
vchiq_log_trace(vchiq_core_log_level, "%d: prs PAUSE@%pK,%x",
state->id, header, size);
if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
vchiq_log_error(vchiq_core_log_level, "%d: PAUSE received in state PAUSED",
state->id);
break;
}
if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
/* Send a PAUSE in response */
if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
QMFLAGS_NO_MUTEX_UNLOCK) == -EAGAIN)
goto bail_not_ready;
}
/* At this point slot_mutex is held */
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
break;
case VCHIQ_MSG_RESUME:
vchiq_log_trace(vchiq_core_log_level, "%d: prs RESUME@%pK,%x",
state->id, header, size);
/* Release the slot mutex */
mutex_unlock(&state->slot_mutex);
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
break;
case VCHIQ_MSG_REMOTE_USE:
vchiq_on_remote_use(state);
break;
case VCHIQ_MSG_REMOTE_RELEASE:
vchiq_on_remote_release(state);
break;
case VCHIQ_MSG_REMOTE_USE_ACTIVE:
break;
default:
vchiq_log_error(vchiq_core_log_level, "%d: prs invalid msgid %x@%pK,%x",
state->id, msgid, header, size);
WARN(1, "invalid message\n");
break;
}
skip_message:
ret = size;
bail_not_ready:
if (service)
vchiq_service_put(service);
return ret;
}
/* Called by the slot handler thread */
static void
parse_rx_slots(struct vchiq_state *state)
{
struct vchiq_shared_state *remote = state->remote;
int tx_pos;
DEBUG_INITIALISE(state->local);
tx_pos = remote->tx_pos;
while (state->rx_pos != tx_pos) {
struct vchiq_header *header;
int size;
DEBUG_TRACE(PARSE_LINE);
if (!state->rx_data) {
int rx_index;
WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK);
rx_index = remote->slot_queue[
SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
rx_index);
state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
/*
* Initialise use_count to one, and increment
* release_count at the end of the slot to avoid
* releasing the slot prematurely.
*/
state->rx_info->use_count = 1;
state->rx_info->release_count = 0;
}
header = (struct vchiq_header *)(state->rx_data +
(state->rx_pos & VCHIQ_SLOT_MASK));
size = parse_message(state, header);
if (size < 0)
return;
state->rx_pos += calc_stride(size);
DEBUG_TRACE(PARSE_LINE);
/*
* Perform some housekeeping when the end of the slot is
* reached.
*/
if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
/* Remove the extra reference count. */
release_slot(state, state->rx_info, NULL, NULL);
state->rx_data = NULL;
}
}
}
/**
* handle_poll() - handle service polling and other rare conditions
* @state: vchiq state struct
*
* Context: Process context
*
* Return:
* * 0 - poll handled successful
* * -EAGAIN - retry later
*/
static int
handle_poll(struct vchiq_state *state)
{
switch (state->conn_state) {
case VCHIQ_CONNSTATE_CONNECTED:
/* Poll the services as requested */
poll_services(state);
break;
case VCHIQ_CONNSTATE_PAUSING:
if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
QMFLAGS_NO_MUTEX_UNLOCK) != -EAGAIN) {
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
} else {
/* Retry later */
return -EAGAIN;
}
break;
case VCHIQ_CONNSTATE_RESUMING:
if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
QMFLAGS_NO_MUTEX_LOCK) != -EAGAIN) {
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
} else {
/*
* This should really be impossible,
* since the PAUSE should have flushed
* through outstanding messages.
*/
vchiq_log_error(vchiq_core_log_level, "Failed to send RESUME message");
}
break;
default:
break;
}
return 0;
}
/* Called by the slot handler thread */
static int
slot_handler_func(void *v)
{
struct vchiq_state *state = v;
struct vchiq_shared_state *local = state->local;
DEBUG_INITIALISE(local);
while (1) {
DEBUG_COUNT(SLOT_HANDLER_COUNT);
DEBUG_TRACE(SLOT_HANDLER_LINE);
remote_event_wait(&state->trigger_event, &local->trigger);
/* Ensure that reads don't overtake the remote_event_wait. */
rmb();
DEBUG_TRACE(SLOT_HANDLER_LINE);
if (state->poll_needed) {
state->poll_needed = 0;
/*
* Handle service polling and other rare conditions here
* out of the mainline code
*/
if (handle_poll(state) == -EAGAIN)
state->poll_needed = 1;
}
DEBUG_TRACE(SLOT_HANDLER_LINE);
parse_rx_slots(state);
}
return 0;
}
/* Called by the recycle thread */
static int
recycle_func(void *v)
{
struct vchiq_state *state = v;
struct vchiq_shared_state *local = state->local;
u32 *found;
size_t length;
length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
GFP_KERNEL);
if (!found)
return -ENOMEM;
while (1) {
remote_event_wait(&state->recycle_event, &local->recycle);
process_free_queue(state, found, length);
}
return 0;
}
/* Called by the sync thread */
static int
sync_func(void *v)
{
struct vchiq_state *state = v;
struct vchiq_shared_state *local = state->local;
struct vchiq_header *header =
(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
state->remote->slot_sync);
while (1) {
struct vchiq_service *service;
int msgid, size;
int type;
unsigned int localport, remoteport;
remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
/* Ensure that reads don't overtake the remote_event_wait. */
rmb();
msgid = header->msgid;
size = header->size;
type = VCHIQ_MSG_TYPE(msgid);
localport = VCHIQ_MSG_DSTPORT(msgid);
remoteport = VCHIQ_MSG_SRCPORT(msgid);
service = find_service_by_port(state, localport);
if (!service) {
vchiq_log_error(vchiq_sync_log_level,
"%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
state->id, msg_type_str(type), header,
remoteport, localport, localport);
release_message_sync(state, header);
continue;
}
if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
int svc_fourcc;
svc_fourcc = service
? service->base.fourcc
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
vchiq_log_trace(vchiq_sync_log_level,
"Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
msg_type_str(type), VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
remoteport, localport, size);
if (size > 0)
vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
}
switch (type) {
case VCHIQ_MSG_OPENACK:
if (size >= sizeof(struct vchiq_openack_payload)) {
const struct vchiq_openack_payload *payload =
(struct vchiq_openack_payload *)
header->data;
service->peer_version = payload->version;
}
vchiq_log_info(vchiq_sync_log_level, "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
state->id, header, size, remoteport, localport,
service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
service->sync = 1;
complete(&service->remove_event);
}
release_message_sync(state, header);
break;
case VCHIQ_MSG_DATA:
vchiq_log_trace(vchiq_sync_log_level, "%d: sf DATA@%pK,%x (%d->%d)",
state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
(service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
NULL) == -EAGAIN)
vchiq_log_error(vchiq_sync_log_level,
"synchronous callback to service %d returns -EAGAIN",
localport);
}
break;
default:
vchiq_log_error(vchiq_sync_log_level, "%d: sf unexpected msgid %x@%pK,%x",
state->id, msgid, header, size);
release_message_sync(state, header);
break;
}
vchiq_service_put(service);
}
return 0;
}
inline const char *
get_conn_state_name(enum vchiq_connstate conn_state)
{
return conn_state_names[conn_state];
}
struct vchiq_slot_zero *
vchiq_init_slots(void *mem_base, int mem_size)
{
int mem_align =
(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
struct vchiq_slot_zero *slot_zero =
(struct vchiq_slot_zero *)(mem_base + mem_align);
int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
check_sizes();
/* Ensure there is enough memory to run an absolutely minimum system */
num_slots -= first_data_slot;
if (num_slots < 4) {
vchiq_log_error(vchiq_core_log_level, "%s - insufficient memory %x bytes",
__func__, mem_size);
return NULL;
}
memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
slot_zero->magic = VCHIQ_MAGIC;
slot_zero->version = VCHIQ_VERSION;
slot_zero->version_min = VCHIQ_VERSION_MIN;
slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
slot_zero->slot_size = VCHIQ_SLOT_SIZE;
slot_zero->max_slots = VCHIQ_MAX_SLOTS;
slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
slot_zero->master.slot_sync = first_data_slot;
slot_zero->master.slot_first = first_data_slot + 1;
slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
return slot_zero;
}
int
vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev)
{
struct vchiq_shared_state *local;
struct vchiq_shared_state *remote;
char threadname[16];
int i, ret;
local = &slot_zero->slave;
remote = &slot_zero->master;
if (local->initialised) {
vchiq_loud_error_header();
if (remote->initialised)
vchiq_loud_error("local state has already been initialised");
else
vchiq_loud_error("master/slave mismatch two slaves");
vchiq_loud_error_footer();
return -EINVAL;
}
memset(state, 0, sizeof(struct vchiq_state));
state->dev = dev;
/*
* initialize shared state pointers
*/
state->local = local;
state->remote = remote;
state->slot_data = (struct vchiq_slot *)slot_zero;
/*
* initialize events and mutexes
*/
init_completion(&state->connect);
mutex_init(&state->mutex);
mutex_init(&state->slot_mutex);
mutex_init(&state->recycle_mutex);
mutex_init(&state->sync_mutex);
mutex_init(&state->bulk_transfer_mutex);
init_completion(&state->slot_available_event);
init_completion(&state->slot_remove_event);
init_completion(&state->data_quota_event);
state->slot_queue_available = 0;
for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
struct vchiq_service_quota *quota = &state->service_quotas[i];
init_completion("a->quota_event);
}
for (i = local->slot_first; i <= local->slot_last; i++) {
local->slot_queue[state->slot_queue_available] = i;
state->slot_queue_available++;
complete(&state->slot_available_event);
}
state->default_slot_quota = state->slot_queue_available / 2;
state->default_message_quota =
min_t(unsigned short, state->default_slot_quota * 256, ~0);
state->previous_data_index = -1;
state->data_use_count = 0;
state->data_quota = state->slot_queue_available - 1;
remote_event_create(&state->trigger_event, &local->trigger);
local->tx_pos = 0;
remote_event_create(&state->recycle_event, &local->recycle);
local->slot_queue_recycle = state->slot_queue_available;
remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
remote_event_create(&state->sync_release_event, &local->sync_release);
/* At start-of-day, the slot is empty and available */
((struct vchiq_header *)
SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
VCHIQ_MSGID_PADDING;
remote_event_signal_local(&state->sync_release_event, &local->sync_release);
local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
ret = vchiq_platform_init_state(state);
if (ret)
return ret;
/*
* bring up slot handler thread
*/
snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname);
if (IS_ERR(state->slot_handler_thread)) {
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
return PTR_ERR(state->slot_handler_thread);
}
set_user_nice(state->slot_handler_thread, -19);
snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname);
if (IS_ERR(state->recycle_thread)) {
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
ret = PTR_ERR(state->recycle_thread);
goto fail_free_handler_thread;
}
set_user_nice(state->recycle_thread, -19);
snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
state->sync_thread = kthread_create(&sync_func, (void *)state, threadname);
if (IS_ERR(state->sync_thread)) {
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
ret = PTR_ERR(state->sync_thread);
goto fail_free_recycle_thread;
}
set_user_nice(state->sync_thread, -20);
wake_up_process(state->slot_handler_thread);
wake_up_process(state->recycle_thread);
wake_up_process(state->sync_thread);
/* Indicate readiness to the other side */
local->initialised = 1;
return 0;
fail_free_recycle_thread:
kthread_stop(state->recycle_thread);
fail_free_handler_thread:
kthread_stop(state->slot_handler_thread);
return ret;
}
void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
struct vchiq_header *header)
{
struct vchiq_service *service = find_service_by_handle(instance, handle);
int pos;
if (!service)
return;
while (service->msg_queue_write == service->msg_queue_read +
VCHIQ_MAX_SLOTS) {
if (wait_for_completion_interruptible(&service->msg_queue_pop))
flush_signals(current);
}
pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
service->msg_queue_write++;
service->msg_queue[pos] = header;
complete(&service->msg_queue_push);
}
EXPORT_SYMBOL(vchiq_msg_queue_push);
struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle)
{
struct vchiq_service *service = find_service_by_handle(instance, handle);
struct vchiq_header *header;
int pos;
if (!service)
return NULL;
if (service->msg_queue_write == service->msg_queue_read)
return NULL;
while (service->msg_queue_write == service->msg_queue_read) {
if (wait_for_completion_interruptible(&service->msg_queue_push))
flush_signals(current);
}
pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
service->msg_queue_read++;
header = service->msg_queue[pos];
complete(&service->msg_queue_pop);
return header;
}
EXPORT_SYMBOL(vchiq_msg_hold);
static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
{
if (!params->callback || !params->fourcc) {
vchiq_loud_error("Can't add service, invalid params\n");
return -EINVAL;
}
return 0;
}
/* Called from application thread when a client or server service is created. */
struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state *state,
const struct vchiq_service_params_kernel *params,
int srvstate, struct vchiq_instance *instance,
void (*userdata_term)(void *userdata))
{
struct vchiq_service *service;
struct vchiq_service __rcu **pservice = NULL;
struct vchiq_service_quota *quota;
int ret;
int i;
ret = vchiq_validate_params(params);
if (ret)
return NULL;
service = kzalloc(sizeof(*service), GFP_KERNEL);
if (!service)
return service;
service->base.fourcc = params->fourcc;
service->base.callback = params->callback;
service->base.userdata = params->userdata;
service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
kref_init(&service->ref_count);
service->srvstate = VCHIQ_SRVSTATE_FREE;
service->userdata_term = userdata_term;
service->localport = VCHIQ_PORT_FREE;
service->remoteport = VCHIQ_PORT_FREE;
service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
VCHIQ_FOURCC_INVALID : params->fourcc;
service->auto_close = 1;
atomic_set(&service->poll_flags, 0);
service->version = params->version;
service->version_min = params->version_min;
service->state = state;
service->instance = instance;
init_completion(&service->remove_event);
init_completion(&service->bulk_remove_event);
init_completion(&service->msg_queue_pop);
init_completion(&service->msg_queue_push);
mutex_init(&service->bulk_mutex);
/*
* Although it is perfectly possible to use a spinlock
* to protect the creation of services, it is overkill as it
* disables interrupts while the array is searched.
* The only danger is of another thread trying to create a
* service - service deletion is safe.
* Therefore it is preferable to use state->mutex which,
* although slower to claim, doesn't block interrupts while
* it is held.
*/
mutex_lock(&state->mutex);
/* Prepare to use a previously unused service */
if (state->unused_service < VCHIQ_MAX_SERVICES)
pservice = &state->services[state->unused_service];
if (srvstate == VCHIQ_SRVSTATE_OPENING) {
for (i = 0; i < state->unused_service; i++) {
if (!rcu_access_pointer(state->services[i])) {
pservice = &state->services[i];
break;
}
}
} else {
rcu_read_lock();
for (i = (state->unused_service - 1); i >= 0; i--) {
struct vchiq_service *srv;
srv = rcu_dereference(state->services[i]);
if (!srv) {
pservice = &state->services[i];
} else if ((srv->public_fourcc == params->fourcc) &&
((srv->instance != instance) ||
(srv->base.callback != params->callback))) {
/*
* There is another server using this
* fourcc which doesn't match.
*/
pservice = NULL;
break;
}
}
rcu_read_unlock();
}
if (pservice) {
service->localport = (pservice - state->services);
if (!handle_seq)
handle_seq = VCHIQ_MAX_STATES *
VCHIQ_MAX_SERVICES;
service->handle = handle_seq |
(state->id * VCHIQ_MAX_SERVICES) |
service->localport;
handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
rcu_assign_pointer(*pservice, service);
if (pservice == &state->services[state->unused_service])
state->unused_service++;
}
mutex_unlock(&state->mutex);
if (!pservice) {
kfree(service);
return NULL;
}
quota = &state->service_quotas[service->localport];
quota->slot_quota = state->default_slot_quota;
quota->message_quota = state->default_message_quota;
if (quota->slot_use_count == 0)
quota->previous_tx_index =
SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
- 1;
/* Bring this service online */
set_service_state(service, srvstate);
vchiq_log_info(vchiq_core_msg_log_level, "%s Service %c%c%c%c SrcPort:%d",
(srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
VCHIQ_FOURCC_AS_4CHARS(params->fourcc), service->localport);
/* Don't unlock the service - leave it with a ref_count of 1. */
return service;
}
int
vchiq_open_service_internal(struct vchiq_service *service, int client_id)
{
struct vchiq_open_payload payload = {
service->base.fourcc,
client_id,
service->version,
service->version_min
};
int status = 0;
service->client_id = client_id;
vchiq_use_service_internal(service);
status = queue_message(service->state,
NULL, MAKE_OPEN(service->localport),
memcpy_copy_callback,
&payload,
sizeof(payload),
QMFLAGS_IS_BLOCKING);
if (status)
return status;
/* Wait for the ACK/NAK */
if (wait_for_completion_interruptible(&service->remove_event)) {
status = -EAGAIN;
vchiq_release_service_internal(service);
} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
(service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
vchiq_log_error(vchiq_core_log_level,
"%d: osi - srvstate = %s (ref %u)",
service->state->id,
srvstate_names[service->srvstate],
kref_read(&service->ref_count));
status = -EINVAL;
VCHIQ_SERVICE_STATS_INC(service, error_count);
vchiq_release_service_internal(service);
}
return status;
}
static void
release_service_messages(struct vchiq_service *service)
{
struct vchiq_state *state = service->state;
int slot_last = state->remote->slot_last;
int i;
/* Release any claimed messages aimed at this service */
if (service->sync) {
struct vchiq_header *header =
(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
state->remote->slot_sync);
if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
release_message_sync(state, header);
return;
}
for (i = state->remote->slot_first; i <= slot_last; i++) {
struct vchiq_slot_info *slot_info =
SLOT_INFO_FROM_INDEX(state, i);
unsigned int pos, end;
char *data;
if (slot_info->release_count == slot_info->use_count)
continue;
data = (char *)SLOT_DATA_FROM_INDEX(state, i);
end = VCHIQ_SLOT_SIZE;
if (data == state->rx_data)
/*
* This buffer is still being read from - stop
* at the current read position
*/
end = state->rx_pos & VCHIQ_SLOT_MASK;
pos = 0;
while (pos < end) {
struct vchiq_header *header =
(struct vchiq_header *)(data + pos);
int msgid = header->msgid;
int port = VCHIQ_MSG_DSTPORT(msgid);
if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
vchiq_log_info(vchiq_core_log_level, " fsi - hdr %pK", header);
release_slot(state, slot_info, header, NULL);
}
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
"fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
}
}
}
static int
do_abort_bulks(struct vchiq_service *service)
{
int status;
/* Abort any outstanding bulk transfers */
if (mutex_lock_killable(&service->bulk_mutex))
return 0;
abort_outstanding_bulks(service, &service->bulk_tx);
abort_outstanding_bulks(service, &service->bulk_rx);
mutex_unlock(&service->bulk_mutex);
status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
if (status)
return 0;
status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
return !status;
}
static int
close_service_complete(struct vchiq_service *service, int failstate)
{
int status;
int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
int newstate;
switch (service->srvstate) {
case VCHIQ_SRVSTATE_OPEN:
case VCHIQ_SRVSTATE_CLOSESENT:
case VCHIQ_SRVSTATE_CLOSERECVD:
if (is_server) {
if (service->auto_close) {
service->client_id = 0;
service->remoteport = VCHIQ_PORT_FREE;
newstate = VCHIQ_SRVSTATE_LISTENING;
} else {
newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
}
} else {
newstate = VCHIQ_SRVSTATE_CLOSED;
}
set_service_state(service, newstate);
break;
case VCHIQ_SRVSTATE_LISTENING:
break;
default:
vchiq_log_error(vchiq_core_log_level, "%s(%x) called in state %s", __func__,
service->handle, srvstate_names[service->srvstate]);
WARN(1, "%s in unexpected state\n", __func__);
return -EINVAL;
}
status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL);
if (status != -EAGAIN) {
int uc = service->service_use_count;
int i;
/* Complete the close process */
for (i = 0; i < uc; i++)
/*
* cater for cases where close is forced and the
* client may not close all it's handles
*/
vchiq_release_service_internal(service);
service->client_id = 0;
service->remoteport = VCHIQ_PORT_FREE;
if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
vchiq_free_service_internal(service);
} else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
if (is_server)
service->closing = 0;
complete(&service->remove_event);
}
} else {
set_service_state(service, failstate);
}
return status;
}
/* Called by the slot handler */
int
vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
{
struct vchiq_state *state = service->state;
int status = 0;
int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
int close_id = MAKE_CLOSE(service->localport,
VCHIQ_MSG_DSTPORT(service->remoteport));
vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)", service->state->id,
service->localport, close_recvd, srvstate_names[service->srvstate]);
switch (service->srvstate) {
case VCHIQ_SRVSTATE_CLOSED:
case VCHIQ_SRVSTATE_HIDDEN:
case VCHIQ_SRVSTATE_LISTENING:
case VCHIQ_SRVSTATE_CLOSEWAIT:
if (close_recvd) {
vchiq_log_error(vchiq_core_log_level, "%s(1) called in state %s",
__func__, srvstate_names[service->srvstate]);
} else if (is_server) {
if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
status = -EINVAL;
} else {
service->client_id = 0;
service->remoteport = VCHIQ_PORT_FREE;
if (service->srvstate == VCHIQ_SRVSTATE_CLOSEWAIT)
set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
}
complete(&service->remove_event);
} else {
vchiq_free_service_internal(service);
}
break;
case VCHIQ_SRVSTATE_OPENING:
if (close_recvd) {
/* The open was rejected - tell the user */
set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT);
complete(&service->remove_event);
} else {
/* Shutdown mid-open - let the other side know */
status = queue_message(state, service, close_id, NULL, NULL, 0, 0);
}
break;
case VCHIQ_SRVSTATE_OPENSYNC:
mutex_lock(&state->sync_mutex);
fallthrough;
case VCHIQ_SRVSTATE_OPEN:
if (close_recvd) {
if (!do_abort_bulks(service))
status = -EAGAIN;
}
release_service_messages(service);
if (!status)
status = queue_message(state, service, close_id, NULL,
NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
if (status) {
if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
mutex_unlock(&state->sync_mutex);
break;
}
if (!close_recvd) {
/* Change the state while the mutex is still held */
set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
mutex_unlock(&state->slot_mutex);
if (service->sync)
mutex_unlock(&state->sync_mutex);
break;
}
/* Change the state while the mutex is still held */
set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
mutex_unlock(&state->slot_mutex);
if (service->sync)
mutex_unlock(&state->sync_mutex);
status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
break;
case VCHIQ_SRVSTATE_CLOSESENT:
if (!close_recvd)
/* This happens when a process is killed mid-close */
break;
if (!do_abort_bulks(service)) {
status = -EAGAIN;
break;
}
if (!status)
status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
break;
case VCHIQ_SRVSTATE_CLOSERECVD:
if (!close_recvd && is_server)
/* Force into LISTENING mode */
set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
break;
default:
vchiq_log_error(vchiq_core_log_level, "%s(%d) called in state %s", __func__,
close_recvd, srvstate_names[service->srvstate]);
break;
}
return status;
}
/* Called from the application process upon process death */
void
vchiq_terminate_service_internal(struct vchiq_service *service)
{
struct vchiq_state *state = service->state;
vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)", state->id,
service->localport, service->remoteport);
mark_service_closing(service);
/* Mark the service for removal by the slot handler */
request_poll(state, service, VCHIQ_POLL_REMOVE);
}
/* Called from the slot handler */
void
vchiq_free_service_internal(struct vchiq_service *service)
{
struct vchiq_state *state = service->state;
vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)", state->id, service->localport);
switch (service->srvstate) {
case VCHIQ_SRVSTATE_OPENING:
case VCHIQ_SRVSTATE_CLOSED:
case VCHIQ_SRVSTATE_HIDDEN:
case VCHIQ_SRVSTATE_LISTENING:
case VCHIQ_SRVSTATE_CLOSEWAIT:
break;
default:
vchiq_log_error(vchiq_core_log_level, "%d: fsi - (%d) in state %s", state->id,
service->localport, srvstate_names[service->srvstate]);
return;
}
set_service_state(service, VCHIQ_SRVSTATE_FREE);
complete(&service->remove_event);
/* Release the initial lock */
vchiq_service_put(service);
}
int
vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
{
struct vchiq_service *service;
int i;
/* Find all services registered to this client and enable them. */
i = 0;
while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
vchiq_service_put(service);
}
if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0,
QMFLAGS_IS_BLOCKING) == -EAGAIN)
return -EAGAIN;
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
}
if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
if (wait_for_completion_interruptible(&state->connect))
return -EAGAIN;
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
complete(&state->connect);
}
return 0;
}
void
vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
{
struct vchiq_service *service;
int i;
/* Find all services registered to this client and remove them. */
i = 0;
while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
(void)vchiq_remove_service(instance, service->handle);
vchiq_service_put(service);
}
}
int
vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
{
/* Unregister the service */
struct vchiq_service *service = find_service_by_handle(instance, handle);
int status = 0;
if (!service)
return -EINVAL;
vchiq_log_info(vchiq_core_log_level, "%d: close_service:%d",
service->state->id, service->localport);
if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
(service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
vchiq_service_put(service);
return -EINVAL;
}
mark_service_closing(service);
if (current == service->state->slot_handler_thread) {
status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
WARN_ON(status == -EAGAIN);
} else {
/* Mark the service for termination by the slot handler */
request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
}
while (1) {
if (wait_for_completion_interruptible(&service->remove_event)) {
status = -EAGAIN;
break;
}
if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
(service->srvstate == VCHIQ_SRVSTATE_OPEN))
break;
vchiq_log_warning(vchiq_core_log_level,
"%d: close_service:%d - waiting in state %s",
service->state->id, service->localport,
srvstate_names[service->srvstate]);
}
if (!status &&
(service->srvstate != VCHIQ_SRVSTATE_FREE) &&
(service->srvstate != VCHIQ_SRVSTATE_LISTENING))
status = -EINVAL;
vchiq_service_put(service);
return status;
}
EXPORT_SYMBOL(vchiq_close_service);
int
vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
{
/* Unregister the service */
struct vchiq_service *service = find_service_by_handle(instance, handle);
int status = 0;
if (!service)
return -EINVAL;
vchiq_log_info(vchiq_core_log_level, "%d: remove_service:%d",
service->state->id, service->localport);
if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
vchiq_service_put(service);
return -EINVAL;
}
mark_service_closing(service);
if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
(current == service->state->slot_handler_thread)) {
/*
* Make it look like a client, because it must be removed and
* not left in the LISTENING state.
*/
service->public_fourcc = VCHIQ_FOURCC_INVALID;
status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
WARN_ON(status == -EAGAIN);
} else {
/* Mark the service for removal by the slot handler */
request_poll(service->state, service, VCHIQ_POLL_REMOVE);
}
while (1) {
if (wait_for_completion_interruptible(&service->remove_event)) {
status = -EAGAIN;
break;
}
if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
(service->srvstate == VCHIQ_SRVSTATE_OPEN))
break;
vchiq_log_warning(vchiq_core_log_level,
"%d: remove_service:%d - waiting in state %s",
service->state->id, service->localport,
srvstate_names[service->srvstate]);
}
if (!status && (service->srvstate != VCHIQ_SRVSTATE_FREE))
status = -EINVAL;
vchiq_service_put(service);
return status;
}
/*
* This function may be called by kernel threads or user threads.
* User threads may receive -EAGAIN to indicate that a signal has been
* received and the call should be retried after being returned to user
* context.
* When called in blocking mode, the userdata field points to a bulk_waiter
* structure.
*/
int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
void *offset, void __user *uoffset, int size, void *userdata,
enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir)
{
struct vchiq_service *service = find_service_by_handle(instance, handle);
struct vchiq_bulk_queue *queue;
struct vchiq_bulk *bulk;
struct vchiq_state *state;
struct bulk_waiter *bulk_waiter = NULL;
const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
int status = -EINVAL;
int payload[2];
if (!service)
goto error_exit;
if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
goto error_exit;
if (!offset && !uoffset)
goto error_exit;
if (vchiq_check_service(service))
goto error_exit;
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
break;
case VCHIQ_BULK_MODE_BLOCKING:
bulk_waiter = userdata;
init_completion(&bulk_waiter->event);
bulk_waiter->actual = 0;
bulk_waiter->bulk = NULL;
break;
case VCHIQ_BULK_MODE_WAITING:
bulk_waiter = userdata;
bulk = bulk_waiter->bulk;
goto waiting;
default:
goto error_exit;
}
state = service->state;
queue = (dir == VCHIQ_BULK_TRANSMIT) ?
&service->bulk_tx : &service->bulk_rx;
if (mutex_lock_killable(&service->bulk_mutex)) {
status = -EAGAIN;
goto error_exit;
}
if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
do {
mutex_unlock(&service->bulk_mutex);
if (wait_for_completion_interruptible(&service->bulk_remove_event)) {
status = -EAGAIN;
goto error_exit;
}
if (mutex_lock_killable(&service->bulk_mutex)) {
status = -EAGAIN;
goto error_exit;
}
} while (queue->local_insert == queue->remove +
VCHIQ_NUM_SERVICE_BULKS);
}
bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
bulk->mode = mode;
bulk->dir = dir;
bulk->userdata = userdata;
bulk->size = size;
bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir))
goto unlock_error_exit;
/*
* Ensure that the bulk data record is visible to the peer
* before proceeding.
*/
wmb();
vchiq_log_info(vchiq_core_log_level, "%d: bt (%d->%d) %cx %x@%pad %pK",
state->id, service->localport, service->remoteport,
dir_char, size, &bulk->data, userdata);
/*
* The slot mutex must be held when the service is being closed, so
* claim it here to ensure that isn't happening
*/
if (mutex_lock_killable(&state->slot_mutex)) {
status = -EAGAIN;
goto cancel_bulk_error_exit;
}
if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
goto unlock_both_error_exit;
payload[0] = lower_32_bits(bulk->data);
payload[1] = bulk->size;
status = queue_message(state,
NULL,
VCHIQ_MAKE_MSG(dir_msgtype,
service->localport,
service->remoteport),
memcpy_copy_callback,
&payload,
sizeof(payload),
QMFLAGS_IS_BLOCKING |
QMFLAGS_NO_MUTEX_LOCK |
QMFLAGS_NO_MUTEX_UNLOCK);
if (status)
goto unlock_both_error_exit;
queue->local_insert++;
mutex_unlock(&state->slot_mutex);
mutex_unlock(&service->bulk_mutex);
vchiq_log_trace(vchiq_core_log_level, "%d: bt:%d %cx li=%x ri=%x p=%x",
state->id, service->localport, dir_char, queue->local_insert,
queue->remote_insert, queue->process);
waiting:
vchiq_service_put(service);
status = 0;
if (bulk_waiter) {
bulk_waiter->bulk = bulk;
if (wait_for_completion_interruptible(&bulk_waiter->event))
status = -EAGAIN;
else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
status = -EINVAL;
}
return status;
unlock_both_error_exit:
mutex_unlock(&state->slot_mutex);
cancel_bulk_error_exit:
vchiq_complete_bulk(service->instance, bulk);
unlock_error_exit:
mutex_unlock(&service->bulk_mutex);
error_exit:
if (service)
vchiq_service_put(service);
return status;
}
int
vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
size_t size)
{
struct vchiq_service *service = find_service_by_handle(instance, handle);
int status = -EINVAL;
int data_id;
if (!service)
goto error_exit;
if (vchiq_check_service(service))
goto error_exit;
if (!size) {
VCHIQ_SERVICE_STATS_INC(service, error_count);
goto error_exit;
}
if (size > VCHIQ_MAX_MSG_SIZE) {
VCHIQ_SERVICE_STATS_INC(service, error_count);
goto error_exit;
}
data_id = MAKE_DATA(service->localport, service->remoteport);
switch (service->srvstate) {
case VCHIQ_SRVSTATE_OPEN:
status = queue_message(service->state, service, data_id,
copy_callback, context, size, 1);
break;
case VCHIQ_SRVSTATE_OPENSYNC:
status = queue_message_sync(service->state, service, data_id,
copy_callback, context, size, 1);
break;
default:
status = -EINVAL;
break;
}
error_exit:
if (service)
vchiq_service_put(service);
return status;
}
int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data,
unsigned int size)
{
int status;
while (1) {
status = vchiq_queue_message(instance, handle, memcpy_copy_callback,
data, size);
/*
* vchiq_queue_message() may return -EAGAIN, so we need to
* implement a retry mechanism since this function is supposed
* to block until queued
*/
if (status != -EAGAIN)
break;
msleep(1);
}
return status;
}
EXPORT_SYMBOL(vchiq_queue_kernel_message);
void
vchiq_release_message(struct vchiq_instance *instance, unsigned int handle,
struct vchiq_header *header)
{
struct vchiq_service *service = find_service_by_handle(instance, handle);
struct vchiq_shared_state *remote;
struct vchiq_state *state;
int slot_index;
if (!service)
return;
state = service->state;
remote = state->remote;
slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
if ((slot_index >= remote->slot_first) &&
(slot_index <= remote->slot_last)) {
int msgid = header->msgid;
if (msgid & VCHIQ_MSGID_CLAIMED) {
struct vchiq_slot_info *slot_info =
SLOT_INFO_FROM_INDEX(state, slot_index);
release_slot(state, slot_info, header, service);
}
} else if (slot_index == remote->slot_sync) {
release_message_sync(state, header);
}
vchiq_service_put(service);
}
EXPORT_SYMBOL(vchiq_release_message);
static void
release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
{
header->msgid = VCHIQ_MSGID_PADDING;
remote_event_signal(&state->remote->sync_release);
}
int
vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version)
{
int status = -EINVAL;
struct vchiq_service *service = find_service_by_handle(instance, handle);
if (!service)
goto exit;
if (vchiq_check_service(service))
goto exit;
if (!peer_version)
goto exit;
*peer_version = service->peer_version;
status = 0;
exit:
if (service)
vchiq_service_put(service);
return status;
}
EXPORT_SYMBOL(vchiq_get_peer_version);
void vchiq_get_config(struct vchiq_config *config)
{
config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
config->max_services = VCHIQ_MAX_SERVICES;
config->version = VCHIQ_VERSION;
config->version_min = VCHIQ_VERSION_MIN;
}
int
vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle,
enum vchiq_service_option option, int value)
{
struct vchiq_service *service = find_service_by_handle(instance, handle);
struct vchiq_service_quota *quota;
int ret = -EINVAL;
if (!service)
return -EINVAL;
switch (option) {
case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
service->auto_close = value;
ret = 0;
break;
case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
quota = &service->state->service_quotas[service->localport];
if (value == 0)
value = service->state->default_slot_quota;
if ((value >= quota->slot_use_count) &&
(value < (unsigned short)~0)) {
quota->slot_quota = value;
if ((value >= quota->slot_use_count) &&
(quota->message_quota >= quota->message_use_count))
/*
* Signal the service that it may have
* dropped below its quota
*/
complete("a->quota_event);
ret = 0;
}
break;
case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
quota = &service->state->service_quotas[service->localport];
if (value == 0)
value = service->state->default_message_quota;
if ((value >= quota->message_use_count) &&
(value < (unsigned short)~0)) {
quota->message_quota = value;
if ((value >= quota->message_use_count) &&
(quota->slot_quota >= quota->slot_use_count))
/*
* Signal the service that it may have
* dropped below its quota
*/
complete("a->quota_event);
ret = 0;
}
break;
case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
(service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
service->sync = value;
ret = 0;
}
break;
case VCHIQ_SERVICE_OPTION_TRACE:
service->trace = value;
ret = 0;
break;
default:
break;
}
vchiq_service_put(service);
return ret;
}
static int
vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
struct vchiq_shared_state *shared, const char *label)
{
static const char *const debug_names[] = {
"<entries>",
"SLOT_HANDLER_COUNT",
"SLOT_HANDLER_LINE",
"PARSE_LINE",
"PARSE_HEADER",
"PARSE_MSGID",
"AWAIT_COMPLETION_LINE",
"DEQUEUE_MESSAGE_LINE",
"SERVICE_CALLBACK_LINE",
"MSG_QUEUE_FULL_COUNT",
"COMPLETION_QUEUE_FULL_COUNT"
};
int i;
char buf[80];
int len;
int err;
len = scnprintf(buf, sizeof(buf), " %s: slots %d-%d tx_pos=%x recycle=%x",
label, shared->slot_first, shared->slot_last,
shared->tx_pos, shared->slot_queue_recycle);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
len = scnprintf(buf, sizeof(buf), " Slots claimed:");
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
for (i = shared->slot_first; i <= shared->slot_last; i++) {
struct vchiq_slot_info slot_info =
*SLOT_INFO_FROM_INDEX(state, i);
if (slot_info.use_count != slot_info.release_count) {
len = scnprintf(buf, sizeof(buf), " %d: %d/%d", i, slot_info.use_count,
slot_info.release_count);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
}
}
for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
debug_names[i], shared->debug[i], shared->debug[i]);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
}
return 0;
}
int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
{
char buf[80];
int len;
int i;
int err;
len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
conn_state_names[state->conn_state]);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
len = scnprintf(buf, sizeof(buf), " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
state->local->tx_pos,
state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
state->rx_pos,
state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
len = scnprintf(buf, sizeof(buf), " Version: %d (min %d)",
VCHIQ_VERSION, VCHIQ_VERSION_MIN);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
if (VCHIQ_ENABLE_STATS) {
len = scnprintf(buf, sizeof(buf),
" Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
state->stats.error_count);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
}
len = scnprintf(buf, sizeof(buf),
" Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
state->local_tx_pos) / VCHIQ_SLOT_SIZE,
state->data_quota - state->data_use_count,
state->local->slot_queue_recycle - state->slot_queue_available,
state->stats.slot_stalls, state->stats.data_stalls);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
err = vchiq_dump_platform_state(dump_context);
if (err)
return err;
err = vchiq_dump_shared_state(dump_context,
state,
state->local,
"Local");
if (err)
return err;
err = vchiq_dump_shared_state(dump_context,
state,
state->remote,
"Remote");
if (err)
return err;
err = vchiq_dump_platform_instances(dump_context);
if (err)
return err;
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service = find_service_by_port(state, i);
if (service) {
err = vchiq_dump_service_state(dump_context, service);
vchiq_service_put(service);
if (err)
return err;
}
}
return 0;
}
int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
{
char buf[80];
int len;
int err;
unsigned int ref_count;
/*Don't include the lock just taken*/
ref_count = kref_read(&service->ref_count) - 1;
len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
service->localport, srvstate_names[service->srvstate],
ref_count);
if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
char remoteport[30];
struct vchiq_service_quota *quota =
&service->state->service_quotas[service->localport];
int fourcc = service->base.fourcc;
int tx_pending, rx_pending;
if (service->remoteport != VCHIQ_PORT_FREE) {
int len2 = scnprintf(remoteport, sizeof(remoteport),
"%u", service->remoteport);
if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
scnprintf(remoteport + len2, sizeof(remoteport) - len2,
" (client %x)", service->client_id);
} else {
strscpy(remoteport, "n/a", sizeof(remoteport));
}
len += scnprintf(buf + len, sizeof(buf) - len,
" '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
VCHIQ_FOURCC_AS_4CHARS(fourcc), remoteport,
quota->message_use_count, quota->message_quota,
quota->slot_use_count, quota->slot_quota);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
tx_pending = service->bulk_tx.local_insert -
service->bulk_tx.remote_insert;
rx_pending = service->bulk_rx.local_insert -
service->bulk_rx.remote_insert;
len = scnprintf(buf, sizeof(buf),
" Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
tx_pending,
tx_pending ?
service->bulk_tx.bulks[BULK_INDEX(service->bulk_tx.remove)].size :
0, rx_pending, rx_pending ?
service->bulk_rx.bulks[BULK_INDEX(service->bulk_rx.remove)].size :
0);
if (VCHIQ_ENABLE_STATS) {
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
len = scnprintf(buf, sizeof(buf),
" Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
service->stats.ctrl_tx_count, service->stats.ctrl_tx_bytes,
service->stats.ctrl_rx_count, service->stats.ctrl_rx_bytes);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
len = scnprintf(buf, sizeof(buf),
" Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
service->stats.bulk_tx_count, service->stats.bulk_tx_bytes,
service->stats.bulk_rx_count, service->stats.bulk_rx_bytes);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
len = scnprintf(buf, sizeof(buf),
" %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
service->stats.quota_stalls, service->stats.slot_stalls,
service->stats.bulk_stalls,
service->stats.bulk_aborted_count,
service->stats.error_count);
}
}
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
if (service->srvstate != VCHIQ_SRVSTATE_FREE)
err = vchiq_dump_platform_service_state(dump_context, service);
return err;
}
void
vchiq_loud_error_header(void)
{
vchiq_log_error(vchiq_core_log_level,
"============================================================================");
vchiq_log_error(vchiq_core_log_level,
"============================================================================");
vchiq_log_error(vchiq_core_log_level, "=====");
}
void
vchiq_loud_error_footer(void)
{
vchiq_log_error(vchiq_core_log_level, "=====");
vchiq_log_error(vchiq_core_log_level,
"============================================================================");
vchiq_log_error(vchiq_core_log_level,
"============================================================================");
}
int vchiq_send_remote_use(struct vchiq_state *state)
{
if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
return -ENOTCONN;
return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
}
int vchiq_send_remote_use_active(struct vchiq_state *state)
{
if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
return -ENOTCONN;
return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
NULL, NULL, 0, 0);
}
void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes)
{
const u8 *mem = void_mem;
size_t offset;
char line_buf[100];
char *s;
while (num_bytes > 0) {
s = line_buf;
for (offset = 0; offset < 16; offset++) {
if (offset < num_bytes)
s += scnprintf(s, 4, "%02x ", mem[offset]);
else
s += scnprintf(s, 4, " ");
}
for (offset = 0; offset < 16; offset++) {
if (offset < num_bytes) {
u8 ch = mem[offset];
if ((ch < ' ') || (ch > '~'))
ch = '.';
*s++ = (char)ch;
}
}
*s++ = '\0';
if (label && (*label != '\0'))
vchiq_log_trace(VCHIQ_LOG_TRACE, "%s: %08x: %s", label, addr, line_buf);
else
vchiq_log_trace(VCHIQ_LOG_TRACE, "%08x: %s", addr, line_buf);
addr += 16;
mem += 16;
if (num_bytes > 16)
num_bytes -= 16;
else
num_bytes = 0;
}
}
| linux-master | drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
* Copyright (c) 2010-2012 Broadcom. All rights reserved.
*/
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/miscdevice.h>
#include "vchiq_core.h"
#include "vchiq_ioctl.h"
#include "vchiq_arm.h"
#include "vchiq_debugfs.h"
static const char *const ioctl_names[] = {
"CONNECT",
"SHUTDOWN",
"CREATE_SERVICE",
"REMOVE_SERVICE",
"QUEUE_MESSAGE",
"QUEUE_BULK_TRANSMIT",
"QUEUE_BULK_RECEIVE",
"AWAIT_COMPLETION",
"DEQUEUE_MESSAGE",
"GET_CLIENT_ID",
"GET_CONFIG",
"CLOSE_SERVICE",
"USE_SERVICE",
"RELEASE_SERVICE",
"SET_SERVICE_OPTION",
"DUMP_PHYS_MEM",
"LIB_VERSION",
"CLOSE_DELIVERED"
};
static_assert(ARRAY_SIZE(ioctl_names) == (VCHIQ_IOC_MAX + 1));
static void
user_service_free(void *userdata)
{
kfree(userdata);
}
static void close_delivered(struct user_service *user_service)
{
vchiq_log_info(vchiq_arm_log_level,
"%s(handle=%x)",
__func__, user_service->service->handle);
if (user_service->close_pending) {
/* Allow the underlying service to be culled */
vchiq_service_put(user_service->service);
/* Wake the user-thread blocked in close_ or remove_service */
complete(&user_service->close_event);
user_service->close_pending = 0;
}
}
struct vchiq_io_copy_callback_context {
struct vchiq_element *element;
size_t element_offset;
unsigned long elements_to_go;
};
static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
size_t offset, size_t maxsize)
{
struct vchiq_io_copy_callback_context *cc = context;
size_t total_bytes_copied = 0;
size_t bytes_this_round;
while (total_bytes_copied < maxsize) {
if (!cc->elements_to_go)
return total_bytes_copied;
if (!cc->element->size) {
cc->elements_to_go--;
cc->element++;
cc->element_offset = 0;
continue;
}
bytes_this_round = min(cc->element->size - cc->element_offset,
maxsize - total_bytes_copied);
if (copy_from_user(dest + total_bytes_copied,
cc->element->data + cc->element_offset,
bytes_this_round))
return -EFAULT;
cc->element_offset += bytes_this_round;
total_bytes_copied += bytes_this_round;
if (cc->element_offset == cc->element->size) {
cc->elements_to_go--;
cc->element++;
cc->element_offset = 0;
}
}
return maxsize;
}
static int
vchiq_ioc_queue_message(struct vchiq_instance *instance, unsigned int handle,
struct vchiq_element *elements, unsigned long count)
{
struct vchiq_io_copy_callback_context context;
int status = 0;
unsigned long i;
size_t total_size = 0;
context.element = elements;
context.element_offset = 0;
context.elements_to_go = count;
for (i = 0; i < count; i++) {
if (!elements[i].data && elements[i].size != 0)
return -EFAULT;
total_size += elements[i].size;
}
status = vchiq_queue_message(instance, handle, vchiq_ioc_copy_element_data,
&context, total_size);
if (status == -EINVAL)
return -EIO;
else if (status == -EAGAIN)
return -EINTR;
return 0;
}
static int vchiq_ioc_create_service(struct vchiq_instance *instance,
struct vchiq_create_service *args)
{
struct user_service *user_service = NULL;
struct vchiq_service *service;
int status = 0;
struct vchiq_service_params_kernel params;
int srvstate;
if (args->is_open && !instance->connected)
return -ENOTCONN;
user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
if (!user_service)
return -ENOMEM;
if (args->is_open) {
srvstate = VCHIQ_SRVSTATE_OPENING;
} else {
srvstate = instance->connected ?
VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN;
}
params = (struct vchiq_service_params_kernel) {
.fourcc = args->params.fourcc,
.callback = service_callback,
.userdata = user_service,
.version = args->params.version,
.version_min = args->params.version_min,
};
service = vchiq_add_service_internal(instance->state, ¶ms,
srvstate, instance,
user_service_free);
if (!service) {
kfree(user_service);
return -EEXIST;
}
user_service->service = service;
user_service->userdata = args->params.userdata;
user_service->instance = instance;
user_service->is_vchi = (args->is_vchi != 0);
user_service->dequeue_pending = 0;
user_service->close_pending = 0;
user_service->message_available_pos = instance->completion_remove - 1;
user_service->msg_insert = 0;
user_service->msg_remove = 0;
init_completion(&user_service->insert_event);
init_completion(&user_service->remove_event);
init_completion(&user_service->close_event);
if (args->is_open) {
status = vchiq_open_service_internal(service, instance->pid);
if (status) {
vchiq_remove_service(instance, service->handle);
return (status == -EAGAIN) ?
-EINTR : -EIO;
}
}
args->handle = service->handle;
return 0;
}
static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
struct vchiq_dequeue_message *args)
{
struct user_service *user_service;
struct vchiq_service *service;
struct vchiq_header *header;
int ret;
DEBUG_INITIALISE(g_state.local);
DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
service = find_service_for_instance(instance, args->handle);
if (!service)
return -EINVAL;
user_service = (struct user_service *)service->base.userdata;
if (user_service->is_vchi == 0) {
ret = -EINVAL;
goto out;
}
spin_lock(&msg_queue_spinlock);
if (user_service->msg_remove == user_service->msg_insert) {
if (!args->blocking) {
spin_unlock(&msg_queue_spinlock);
DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
ret = -EWOULDBLOCK;
goto out;
}
user_service->dequeue_pending = 1;
ret = 0;
do {
spin_unlock(&msg_queue_spinlock);
DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
if (wait_for_completion_interruptible(&user_service->insert_event)) {
vchiq_log_info(vchiq_arm_log_level,
"DEQUEUE_MESSAGE interrupted");
ret = -EINTR;
break;
}
spin_lock(&msg_queue_spinlock);
} while (user_service->msg_remove == user_service->msg_insert);
if (ret)
goto out;
}
if (WARN_ON_ONCE((int)(user_service->msg_insert -
user_service->msg_remove) < 0)) {
spin_unlock(&msg_queue_spinlock);
ret = -EINVAL;
goto out;
}
header = user_service->msg_queue[user_service->msg_remove &
(MSG_QUEUE_SIZE - 1)];
user_service->msg_remove++;
spin_unlock(&msg_queue_spinlock);
complete(&user_service->remove_event);
if (!header) {
ret = -ENOTCONN;
} else if (header->size <= args->bufsize) {
/* Copy to user space if msgbuf is not NULL */
if (!args->buf || (copy_to_user(args->buf, header->data, header->size) == 0)) {
ret = header->size;
vchiq_release_message(instance, service->handle, header);
} else {
ret = -EFAULT;
}
} else {
vchiq_log_error(vchiq_arm_log_level,
"header %pK: bufsize %x < size %x",
header, args->bufsize, header->size);
WARN(1, "invalid size\n");
ret = -EMSGSIZE;
}
DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
out:
vchiq_service_put(service);
return ret;
}
static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
struct vchiq_queue_bulk_transfer *args,
enum vchiq_bulk_dir dir,
enum vchiq_bulk_mode __user *mode)
{
struct vchiq_service *service;
struct bulk_waiter_node *waiter = NULL, *iter;
void *userdata;
int status = 0;
int ret;
service = find_service_for_instance(instance, args->handle);
if (!service)
return -EINVAL;
if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
if (!waiter) {
ret = -ENOMEM;
goto out;
}
userdata = &waiter->bulk_waiter;
} else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
mutex_lock(&instance->bulk_waiter_list_mutex);
list_for_each_entry(iter, &instance->bulk_waiter_list,
list) {
if (iter->pid == current->pid) {
list_del(&iter->list);
waiter = iter;
break;
}
}
mutex_unlock(&instance->bulk_waiter_list_mutex);
if (!waiter) {
vchiq_log_error(vchiq_arm_log_level,
"no bulk_waiter found for pid %d", current->pid);
ret = -ESRCH;
goto out;
}
vchiq_log_info(vchiq_arm_log_level,
"found bulk_waiter %pK for pid %d", waiter, current->pid);
userdata = &waiter->bulk_waiter;
} else {
userdata = args->userdata;
}
status = vchiq_bulk_transfer(instance, args->handle, NULL, args->data, args->size,
userdata, args->mode, dir);
if (!waiter) {
ret = 0;
goto out;
}
if ((status != -EAGAIN) || fatal_signal_pending(current) ||
!waiter->bulk_waiter.bulk) {
if (waiter->bulk_waiter.bulk) {
/* Cancel the signal when the transfer completes. */
spin_lock(&bulk_waiter_spinlock);
waiter->bulk_waiter.bulk->userdata = NULL;
spin_unlock(&bulk_waiter_spinlock);
}
kfree(waiter);
ret = 0;
} else {
const enum vchiq_bulk_mode mode_waiting =
VCHIQ_BULK_MODE_WAITING;
waiter->pid = current->pid;
mutex_lock(&instance->bulk_waiter_list_mutex);
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
vchiq_log_info(vchiq_arm_log_level,
"saved bulk_waiter %pK for pid %d", waiter, current->pid);
ret = put_user(mode_waiting, mode);
}
out:
vchiq_service_put(service);
if (ret)
return ret;
else if (status == -EINVAL)
return -EIO;
else if (status == -EAGAIN)
return -EINTR;
return 0;
}
/* read a user pointer value from an array pointers in user space */
static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index)
{
int ret;
if (in_compat_syscall()) {
compat_uptr_t ptr32;
compat_uptr_t __user *uptr = ubuf;
ret = get_user(ptr32, uptr + index);
if (ret)
return ret;
*buf = compat_ptr(ptr32);
} else {
uintptr_t ptr, __user *uptr = ubuf;
ret = get_user(ptr, uptr + index);
if (ret)
return ret;
*buf = (void __user *)ptr;
}
return 0;
}
struct vchiq_completion_data32 {
enum vchiq_reason reason;
compat_uptr_t header;
compat_uptr_t service_userdata;
compat_uptr_t bulk_userdata;
};
static int vchiq_put_completion(struct vchiq_completion_data __user *buf,
struct vchiq_completion_data *completion,
int index)
{
struct vchiq_completion_data32 __user *buf32 = (void __user *)buf;
if (in_compat_syscall()) {
struct vchiq_completion_data32 tmp = {
.reason = completion->reason,
.header = ptr_to_compat(completion->header),
.service_userdata = ptr_to_compat(completion->service_userdata),
.bulk_userdata = ptr_to_compat(completion->bulk_userdata),
};
if (copy_to_user(&buf32[index], &tmp, sizeof(tmp)))
return -EFAULT;
} else {
if (copy_to_user(&buf[index], completion, sizeof(*completion)))
return -EFAULT;
}
return 0;
}
static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
struct vchiq_await_completion *args,
int __user *msgbufcountp)
{
int msgbufcount;
int remove;
int ret;
DEBUG_INITIALISE(g_state.local);
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
if (!instance->connected)
return -ENOTCONN;
mutex_lock(&instance->completion_mutex);
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
while ((instance->completion_remove == instance->completion_insert) && !instance->closing) {
int rc;
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
mutex_unlock(&instance->completion_mutex);
rc = wait_for_completion_interruptible(&instance->insert_event);
mutex_lock(&instance->completion_mutex);
if (rc) {
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
vchiq_log_info(vchiq_arm_log_level,
"AWAIT_COMPLETION interrupted");
ret = -EINTR;
goto out;
}
}
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
msgbufcount = args->msgbufcount;
remove = instance->completion_remove;
for (ret = 0; ret < args->count; ret++) {
struct vchiq_completion_data_kernel *completion;
struct vchiq_completion_data user_completion;
struct vchiq_service *service;
struct user_service *user_service;
struct vchiq_header *header;
if (remove == instance->completion_insert)
break;
completion = &instance->completions[remove & (MAX_COMPLETIONS - 1)];
/*
* A read memory barrier is needed to stop
* prefetch of a stale completion record
*/
rmb();
service = completion->service_userdata;
user_service = service->base.userdata;
memset(&user_completion, 0, sizeof(user_completion));
user_completion = (struct vchiq_completion_data) {
.reason = completion->reason,
.service_userdata = user_service->userdata,
};
header = completion->header;
if (header) {
void __user *msgbuf;
int msglen;
msglen = header->size + sizeof(struct vchiq_header);
/* This must be a VCHIQ-style service */
if (args->msgbufsize < msglen) {
vchiq_log_error(vchiq_arm_log_level,
"header %pK: msgbufsize %x < msglen %x",
header, args->msgbufsize, msglen);
WARN(1, "invalid message size\n");
if (ret == 0)
ret = -EMSGSIZE;
break;
}
if (msgbufcount <= 0)
/* Stall here for lack of a buffer for the message. */
break;
/* Get the pointer from user space */
msgbufcount--;
if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
msgbufcount)) {
if (ret == 0)
ret = -EFAULT;
break;
}
/* Copy the message to user space */
if (copy_to_user(msgbuf, header, msglen)) {
if (ret == 0)
ret = -EFAULT;
break;
}
/* Now it has been copied, the message can be released. */
vchiq_release_message(instance, service->handle, header);
/* The completion must point to the msgbuf. */
user_completion.header = msgbuf;
}
if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
!instance->use_close_delivered)
vchiq_service_put(service);
/*
* FIXME: address space mismatch, does bulk_userdata
* actually point to user or kernel memory?
*/
user_completion.bulk_userdata = completion->bulk_userdata;
if (vchiq_put_completion(args->buf, &user_completion, ret)) {
if (ret == 0)
ret = -EFAULT;
break;
}
/*
* Ensure that the above copy has completed
* before advancing the remove pointer.
*/
mb();
remove++;
instance->completion_remove = remove;
}
if (msgbufcount != args->msgbufcount) {
if (put_user(msgbufcount, msgbufcountp))
ret = -EFAULT;
}
out:
if (ret)
complete(&instance->remove_event);
mutex_unlock(&instance->completion_mutex);
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
return ret;
}
static long
vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct vchiq_instance *instance = file->private_data;
int status = 0;
struct vchiq_service *service = NULL;
long ret = 0;
int i, rc;
vchiq_log_trace(vchiq_arm_log_level,
"%s - instance %pK, cmd %s, arg %lx", __func__, instance,
((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
switch (cmd) {
case VCHIQ_IOC_SHUTDOWN:
if (!instance->connected)
break;
/* Remove all services */
i = 0;
while ((service = next_service_by_instance(instance->state,
instance, &i))) {
status = vchiq_remove_service(instance, service->handle);
vchiq_service_put(service);
if (status)
break;
}
service = NULL;
if (!status) {
/* Wake the completion thread and ask it to exit */
instance->closing = 1;
complete(&instance->insert_event);
}
break;
case VCHIQ_IOC_CONNECT:
if (instance->connected) {
ret = -EINVAL;
break;
}
rc = mutex_lock_killable(&instance->state->mutex);
if (rc) {
vchiq_log_error(vchiq_arm_log_level,
"vchiq: connect: could not lock mutex for state %d: %d",
instance->state->id, rc);
ret = -EINTR;
break;
}
status = vchiq_connect_internal(instance->state, instance);
mutex_unlock(&instance->state->mutex);
if (!status)
instance->connected = 1;
else
vchiq_log_error(vchiq_arm_log_level,
"vchiq: could not connect: %d", status);
break;
case VCHIQ_IOC_CREATE_SERVICE: {
struct vchiq_create_service __user *argp;
struct vchiq_create_service args;
argp = (void __user *)arg;
if (copy_from_user(&args, argp, sizeof(args))) {
ret = -EFAULT;
break;
}
ret = vchiq_ioc_create_service(instance, &args);
if (ret < 0)
break;
if (put_user(args.handle, &argp->handle)) {
vchiq_remove_service(instance, args.handle);
ret = -EFAULT;
}
} break;
case VCHIQ_IOC_CLOSE_SERVICE:
case VCHIQ_IOC_REMOVE_SERVICE: {
unsigned int handle = (unsigned int)arg;
struct user_service *user_service;
service = find_service_for_instance(instance, handle);
if (!service) {
ret = -EINVAL;
break;
}
user_service = service->base.userdata;
/*
* close_pending is false on first entry, and when the
* wait in vchiq_close_service has been interrupted.
*/
if (!user_service->close_pending) {
status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
vchiq_close_service(instance, service->handle) :
vchiq_remove_service(instance, service->handle);
if (status)
break;
}
/*
* close_pending is true once the underlying service
* has been closed until the client library calls the
* CLOSE_DELIVERED ioctl, signalling close_event.
*/
if (user_service->close_pending &&
wait_for_completion_interruptible(&user_service->close_event))
status = -EAGAIN;
break;
}
case VCHIQ_IOC_USE_SERVICE:
case VCHIQ_IOC_RELEASE_SERVICE: {
unsigned int handle = (unsigned int)arg;
service = find_service_for_instance(instance, handle);
if (service) {
ret = (cmd == VCHIQ_IOC_USE_SERVICE) ?
vchiq_use_service_internal(service) :
vchiq_release_service_internal(service);
if (ret) {
vchiq_log_error(vchiq_susp_log_level,
"%s: cmd %s returned error %ld for service %c%c%c%c:%03d",
__func__, (cmd == VCHIQ_IOC_USE_SERVICE) ?
"VCHIQ_IOC_USE_SERVICE" :
"VCHIQ_IOC_RELEASE_SERVICE",
ret,
VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
service->client_id);
}
} else {
ret = -EINVAL;
}
} break;
case VCHIQ_IOC_QUEUE_MESSAGE: {
struct vchiq_queue_message args;
if (copy_from_user(&args, (const void __user *)arg,
sizeof(args))) {
ret = -EFAULT;
break;
}
service = find_service_for_instance(instance, args.handle);
if (service && (args.count <= MAX_ELEMENTS)) {
/* Copy elements into kernel space */
struct vchiq_element elements[MAX_ELEMENTS];
if (copy_from_user(elements, args.elements,
args.count * sizeof(struct vchiq_element)) == 0)
ret = vchiq_ioc_queue_message(instance, args.handle, elements,
args.count);
else
ret = -EFAULT;
} else {
ret = -EINVAL;
}
} break;
case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
struct vchiq_queue_bulk_transfer args;
struct vchiq_queue_bulk_transfer __user *argp;
enum vchiq_bulk_dir dir =
(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
argp = (void __user *)arg;
if (copy_from_user(&args, argp, sizeof(args))) {
ret = -EFAULT;
break;
}
ret = vchiq_irq_queue_bulk_tx_rx(instance, &args,
dir, &argp->mode);
} break;
case VCHIQ_IOC_AWAIT_COMPLETION: {
struct vchiq_await_completion args;
struct vchiq_await_completion __user *argp;
argp = (void __user *)arg;
if (copy_from_user(&args, argp, sizeof(args))) {
ret = -EFAULT;
break;
}
ret = vchiq_ioc_await_completion(instance, &args,
&argp->msgbufcount);
} break;
case VCHIQ_IOC_DEQUEUE_MESSAGE: {
struct vchiq_dequeue_message args;
if (copy_from_user(&args, (const void __user *)arg,
sizeof(args))) {
ret = -EFAULT;
break;
}
ret = vchiq_ioc_dequeue_message(instance, &args);
} break;
case VCHIQ_IOC_GET_CLIENT_ID: {
unsigned int handle = (unsigned int)arg;
ret = vchiq_get_client_id(instance, handle);
} break;
case VCHIQ_IOC_GET_CONFIG: {
struct vchiq_get_config args;
struct vchiq_config config;
if (copy_from_user(&args, (const void __user *)arg,
sizeof(args))) {
ret = -EFAULT;
break;
}
if (args.config_size > sizeof(config)) {
ret = -EINVAL;
break;
}
vchiq_get_config(&config);
if (copy_to_user(args.pconfig, &config, args.config_size)) {
ret = -EFAULT;
break;
}
} break;
case VCHIQ_IOC_SET_SERVICE_OPTION: {
struct vchiq_set_service_option args;
if (copy_from_user(&args, (const void __user *)arg,
sizeof(args))) {
ret = -EFAULT;
break;
}
service = find_service_for_instance(instance, args.handle);
if (!service) {
ret = -EINVAL;
break;
}
ret = vchiq_set_service_option(instance, args.handle, args.option,
args.value);
} break;
case VCHIQ_IOC_LIB_VERSION: {
unsigned int lib_version = (unsigned int)arg;
if (lib_version < VCHIQ_VERSION_MIN)
ret = -EINVAL;
else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
instance->use_close_delivered = 1;
} break;
case VCHIQ_IOC_CLOSE_DELIVERED: {
unsigned int handle = (unsigned int)arg;
service = find_closed_service_for_instance(instance, handle);
if (service) {
struct user_service *user_service =
(struct user_service *)service->base.userdata;
close_delivered(user_service);
} else {
ret = -EINVAL;
}
} break;
default:
ret = -ENOTTY;
break;
}
if (service)
vchiq_service_put(service);
if (ret == 0) {
if (status == -EINVAL)
ret = -EIO;
else if (status == -EAGAIN)
ret = -EINTR;
}
if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK))
vchiq_log_info(vchiq_arm_log_level,
" ioctl instance %pK, cmd %s -> status %d, %ld",
instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
else
vchiq_log_trace(vchiq_arm_log_level,
" ioctl instance %pK, cmd %s -> status %d, %ld",
instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
return ret;
}
#if defined(CONFIG_COMPAT)
struct vchiq_service_params32 {
int fourcc;
compat_uptr_t callback;
compat_uptr_t userdata;
short version; /* Increment for non-trivial changes */
short version_min; /* Update for incompatible changes */
};
struct vchiq_create_service32 {
struct vchiq_service_params32 params;
int is_open;
int is_vchi;
unsigned int handle; /* OUT */
};
#define VCHIQ_IOC_CREATE_SERVICE32 \
_IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
static long
vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd,
struct vchiq_create_service32 __user *ptrargs32)
{
struct vchiq_create_service args;
struct vchiq_create_service32 args32;
struct vchiq_instance *instance = file->private_data;
long ret;
if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
return -EFAULT;
args = (struct vchiq_create_service) {
.params = {
.fourcc = args32.params.fourcc,
.callback = compat_ptr(args32.params.callback),
.userdata = compat_ptr(args32.params.userdata),
.version = args32.params.version,
.version_min = args32.params.version_min,
},
.is_open = args32.is_open,
.is_vchi = args32.is_vchi,
.handle = args32.handle,
};
ret = vchiq_ioc_create_service(instance, &args);
if (ret < 0)
return ret;
if (put_user(args.handle, &ptrargs32->handle)) {
vchiq_remove_service(instance, args.handle);
return -EFAULT;
}
return 0;
}
struct vchiq_element32 {
compat_uptr_t data;
unsigned int size;
};
struct vchiq_queue_message32 {
unsigned int handle;
unsigned int count;
compat_uptr_t elements;
};
#define VCHIQ_IOC_QUEUE_MESSAGE32 \
_IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32)
static long
vchiq_compat_ioctl_queue_message(struct file *file,
unsigned int cmd,
struct vchiq_queue_message32 __user *arg)
{
struct vchiq_queue_message args;
struct vchiq_queue_message32 args32;
struct vchiq_service *service;
struct vchiq_instance *instance = file->private_data;
int ret;
if (copy_from_user(&args32, arg, sizeof(args32)))
return -EFAULT;
args = (struct vchiq_queue_message) {
.handle = args32.handle,
.count = args32.count,
.elements = compat_ptr(args32.elements),
};
if (args32.count > MAX_ELEMENTS)
return -EINVAL;
service = find_service_for_instance(instance, args.handle);
if (!service)
return -EINVAL;
if (args32.elements && args32.count) {
struct vchiq_element32 element32[MAX_ELEMENTS];
struct vchiq_element elements[MAX_ELEMENTS];
unsigned int count;
if (copy_from_user(&element32, args.elements,
sizeof(element32))) {
vchiq_service_put(service);
return -EFAULT;
}
for (count = 0; count < args32.count; count++) {
elements[count].data =
compat_ptr(element32[count].data);
elements[count].size = element32[count].size;
}
ret = vchiq_ioc_queue_message(instance, args.handle, elements,
args.count);
} else {
ret = -EINVAL;
}
vchiq_service_put(service);
return ret;
}
struct vchiq_queue_bulk_transfer32 {
unsigned int handle;
compat_uptr_t data;
unsigned int size;
compat_uptr_t userdata;
enum vchiq_bulk_mode mode;
};
#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
_IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
#define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
_IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
static long
vchiq_compat_ioctl_queue_bulk(struct file *file,
unsigned int cmd,
struct vchiq_queue_bulk_transfer32 __user *argp)
{
struct vchiq_queue_bulk_transfer32 args32;
struct vchiq_queue_bulk_transfer args;
enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
if (copy_from_user(&args32, argp, sizeof(args32)))
return -EFAULT;
args = (struct vchiq_queue_bulk_transfer) {
.handle = args32.handle,
.data = compat_ptr(args32.data),
.size = args32.size,
.userdata = compat_ptr(args32.userdata),
.mode = args32.mode,
};
return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args,
dir, &argp->mode);
}
struct vchiq_await_completion32 {
unsigned int count;
compat_uptr_t buf;
unsigned int msgbufsize;
unsigned int msgbufcount; /* IN/OUT */
compat_uptr_t msgbufs;
};
#define VCHIQ_IOC_AWAIT_COMPLETION32 \
_IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
static long
vchiq_compat_ioctl_await_completion(struct file *file,
unsigned int cmd,
struct vchiq_await_completion32 __user *argp)
{
struct vchiq_await_completion args;
struct vchiq_await_completion32 args32;
if (copy_from_user(&args32, argp, sizeof(args32)))
return -EFAULT;
args = (struct vchiq_await_completion) {
.count = args32.count,
.buf = compat_ptr(args32.buf),
.msgbufsize = args32.msgbufsize,
.msgbufcount = args32.msgbufcount,
.msgbufs = compat_ptr(args32.msgbufs),
};
return vchiq_ioc_await_completion(file->private_data, &args,
&argp->msgbufcount);
}
struct vchiq_dequeue_message32 {
unsigned int handle;
int blocking;
unsigned int bufsize;
compat_uptr_t buf;
};
#define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
_IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
static long
vchiq_compat_ioctl_dequeue_message(struct file *file,
unsigned int cmd,
struct vchiq_dequeue_message32 __user *arg)
{
struct vchiq_dequeue_message32 args32;
struct vchiq_dequeue_message args;
if (copy_from_user(&args32, arg, sizeof(args32)))
return -EFAULT;
args = (struct vchiq_dequeue_message) {
.handle = args32.handle,
.blocking = args32.blocking,
.bufsize = args32.bufsize,
.buf = compat_ptr(args32.buf),
};
return vchiq_ioc_dequeue_message(file->private_data, &args);
}
struct vchiq_get_config32 {
unsigned int config_size;
compat_uptr_t pconfig;
};
#define VCHIQ_IOC_GET_CONFIG32 \
_IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
static long
vchiq_compat_ioctl_get_config(struct file *file,
unsigned int cmd,
struct vchiq_get_config32 __user *arg)
{
struct vchiq_get_config32 args32;
struct vchiq_config config;
void __user *ptr;
if (copy_from_user(&args32, arg, sizeof(args32)))
return -EFAULT;
if (args32.config_size > sizeof(config))
return -EINVAL;
vchiq_get_config(&config);
ptr = compat_ptr(args32.pconfig);
if (copy_to_user(ptr, &config, args32.config_size))
return -EFAULT;
return 0;
}
static long
vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = compat_ptr(arg);
switch (cmd) {
case VCHIQ_IOC_CREATE_SERVICE32:
return vchiq_compat_ioctl_create_service(file, cmd, argp);
case VCHIQ_IOC_QUEUE_MESSAGE32:
return vchiq_compat_ioctl_queue_message(file, cmd, argp);
case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
return vchiq_compat_ioctl_queue_bulk(file, cmd, argp);
case VCHIQ_IOC_AWAIT_COMPLETION32:
return vchiq_compat_ioctl_await_completion(file, cmd, argp);
case VCHIQ_IOC_DEQUEUE_MESSAGE32:
return vchiq_compat_ioctl_dequeue_message(file, cmd, argp);
case VCHIQ_IOC_GET_CONFIG32:
return vchiq_compat_ioctl_get_config(file, cmd, argp);
default:
return vchiq_ioctl(file, cmd, (unsigned long)argp);
}
}
#endif
static int vchiq_open(struct inode *inode, struct file *file)
{
struct vchiq_state *state = vchiq_get_state();
struct vchiq_instance *instance;
vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
if (!state) {
vchiq_log_error(vchiq_arm_log_level,
"vchiq has no connection to VideoCore");
return -ENOTCONN;
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance)
return -ENOMEM;
instance->state = state;
instance->pid = current->tgid;
vchiq_debugfs_add_instance(instance);
init_completion(&instance->insert_event);
init_completion(&instance->remove_event);
mutex_init(&instance->completion_mutex);
mutex_init(&instance->bulk_waiter_list_mutex);
INIT_LIST_HEAD(&instance->bulk_waiter_list);
file->private_data = instance;
return 0;
}
static int vchiq_release(struct inode *inode, struct file *file)
{
struct vchiq_instance *instance = file->private_data;
struct vchiq_state *state = vchiq_get_state();
struct vchiq_service *service;
int ret = 0;
int i;
vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__,
(unsigned long)instance);
if (!state) {
ret = -EPERM;
goto out;
}
/* Ensure videocore is awake to allow termination. */
vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
mutex_lock(&instance->completion_mutex);
/* Wake the completion thread and ask it to exit */
instance->closing = 1;
complete(&instance->insert_event);
mutex_unlock(&instance->completion_mutex);
/* Wake the slot handler if the completion queue is full. */
complete(&instance->remove_event);
/* Mark all services for termination... */
i = 0;
while ((service = next_service_by_instance(state, instance, &i))) {
struct user_service *user_service = service->base.userdata;
/* Wake the slot handler if the msg queue is full. */
complete(&user_service->remove_event);
vchiq_terminate_service_internal(service);
vchiq_service_put(service);
}
/* ...and wait for them to die */
i = 0;
while ((service = next_service_by_instance(state, instance, &i))) {
struct user_service *user_service = service->base.userdata;
wait_for_completion(&service->remove_event);
if (WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE)) {
vchiq_service_put(service);
break;
}
spin_lock(&msg_queue_spinlock);
while (user_service->msg_remove != user_service->msg_insert) {
struct vchiq_header *header;
int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
header = user_service->msg_queue[m];
user_service->msg_remove++;
spin_unlock(&msg_queue_spinlock);
if (header)
vchiq_release_message(instance, service->handle, header);
spin_lock(&msg_queue_spinlock);
}
spin_unlock(&msg_queue_spinlock);
vchiq_service_put(service);
}
/* Release any closed services */
while (instance->completion_remove != instance->completion_insert) {
struct vchiq_completion_data_kernel *completion;
struct vchiq_service *service;
completion = &instance->completions[instance->completion_remove
& (MAX_COMPLETIONS - 1)];
service = completion->service_userdata;
if (completion->reason == VCHIQ_SERVICE_CLOSED) {
struct user_service *user_service =
service->base.userdata;
/* Wake any blocked user-thread */
if (instance->use_close_delivered)
complete(&user_service->close_event);
vchiq_service_put(service);
}
instance->completion_remove++;
}
/* Release the PEER service count. */
vchiq_release_internal(instance->state, NULL);
free_bulk_waiter(instance);
vchiq_debugfs_remove_instance(instance);
kfree(instance);
file->private_data = NULL;
out:
return ret;
}
static ssize_t
vchiq_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct dump_context context;
int err;
context.buf = buf;
context.actual = 0;
context.space = count;
context.offset = *ppos;
err = vchiq_dump_state(&context, &g_state);
if (err)
return err;
*ppos += context.actual;
return context.actual;
}
static const struct file_operations
vchiq_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = vchiq_ioctl,
#if defined(CONFIG_COMPAT)
.compat_ioctl = vchiq_compat_ioctl,
#endif
.open = vchiq_open,
.release = vchiq_release,
.read = vchiq_read
};
static struct miscdevice vchiq_miscdev = {
.fops = &vchiq_fops,
.minor = MISC_DYNAMIC_MINOR,
.name = "vchiq",
};
/**
* vchiq_register_chrdev - Register the char driver for vchiq
* and create the necessary class and
* device files in userspace.
* @parent The parent of the char device.
*
* Returns 0 on success else returns the error code.
*/
int vchiq_register_chrdev(struct device *parent)
{
vchiq_miscdev.parent = parent;
return misc_register(&vchiq_miscdev);
}
/**
* vchiq_deregister_chrdev - Deregister and cleanup the vchiq char
* driver and device files
*/
void vchiq_deregister_chrdev(void)
{
misc_deregister(&vchiq_miscdev);
}
| linux-master | drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
#include "vchiq_connected.h"
#include "vchiq_core.h"
#include <linux/module.h>
#include <linux/mutex.h>
#define MAX_CALLBACKS 10
static int g_connected;
static int g_num_deferred_callbacks;
static void (*g_deferred_callback[MAX_CALLBACKS])(void);
static int g_once_init;
static DEFINE_MUTEX(g_connected_mutex);
/* Function to initialize our lock */
static void connected_init(void)
{
if (!g_once_init)
g_once_init = 1;
}
/*
* This function is used to defer initialization until the vchiq stack is
* initialized. If the stack is already initialized, then the callback will
* be made immediately, otherwise it will be deferred until
* vchiq_call_connected_callbacks is called.
*/
void vchiq_add_connected_callback(void (*callback)(void))
{
connected_init();
if (mutex_lock_killable(&g_connected_mutex))
return;
if (g_connected) {
/* We're already connected. Call the callback immediately. */
callback();
} else {
if (g_num_deferred_callbacks >= MAX_CALLBACKS) {
vchiq_log_error(vchiq_core_log_level,
"There already %d callback registered - please increase MAX_CALLBACKS",
g_num_deferred_callbacks);
} else {
g_deferred_callback[g_num_deferred_callbacks] =
callback;
g_num_deferred_callbacks++;
}
}
mutex_unlock(&g_connected_mutex);
}
EXPORT_SYMBOL(vchiq_add_connected_callback);
/*
* This function is called by the vchiq stack once it has been connected to
* the videocore and clients can start to use the stack.
*/
void vchiq_call_connected_callbacks(void)
{
int i;
connected_init();
if (mutex_lock_killable(&g_connected_mutex))
return;
for (i = 0; i < g_num_deferred_callbacks; i++)
g_deferred_callback[i]();
g_num_deferred_callbacks = 0;
g_connected = 1;
mutex_unlock(&g_connected_mutex);
}
| linux-master | drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
* Authors: Vincent Sanders @ Collabora
* Dave Stevenson @ Broadcom
* (now [email protected])
* Simon Mellor @ Broadcom
* Luke Diamand @ Broadcom
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <media/videobuf2-vmalloc.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-common.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include "../vchiq-mmal/mmal-common.h"
#include "../vchiq-mmal/mmal-encodings.h"
#include "../vchiq-mmal/mmal-vchiq.h"
#include "../vchiq-mmal/mmal-msg.h"
#include "../vchiq-mmal/mmal-parameters.h"
#include "bcm2835-camera.h"
#define MIN_WIDTH 32
#define MIN_HEIGHT 32
#define MIN_BUFFER_SIZE (80 * 1024)
#define MAX_VIDEO_MODE_WIDTH 1280
#define MAX_VIDEO_MODE_HEIGHT 720
#define MAX_BCM2835_CAMERAS 2
int bcm2835_v4l2_debug;
module_param_named(debug, bcm2835_v4l2_debug, int, 0644);
MODULE_PARM_DESC(bcm2835_v4l2_debug, "Debug level 0-2");
#define UNSET (-1)
static int video_nr[] = {[0 ... (MAX_BCM2835_CAMERAS - 1)] = UNSET };
module_param_array(video_nr, int, NULL, 0644);
MODULE_PARM_DESC(video_nr, "videoX start numbers, -1 is autodetect");
static int max_video_width = MAX_VIDEO_MODE_WIDTH;
static int max_video_height = MAX_VIDEO_MODE_HEIGHT;
module_param(max_video_width, int, 0644);
MODULE_PARM_DESC(max_video_width, "Threshold for video mode");
module_param(max_video_height, int, 0644);
MODULE_PARM_DESC(max_video_height, "Threshold for video mode");
/* camera instance counter */
static atomic_t camera_instance = ATOMIC_INIT(0);
/* global device data array */
static struct bcm2835_mmal_dev *gdev[MAX_BCM2835_CAMERAS];
#define FPS_MIN 1
#define FPS_MAX 90
/* timeperframe: min/max and default */
static const struct v4l2_fract
tpf_min = {.numerator = 1, .denominator = FPS_MAX},
tpf_max = {.numerator = 1, .denominator = FPS_MIN},
tpf_default = {.numerator = 1000, .denominator = 30000};
/* Container for MMAL and VB2 buffers*/
struct vb2_mmal_buffer {
struct vb2_v4l2_buffer vb;
struct mmal_buffer mmal;
};
/* video formats */
static struct mmal_fmt formats[] = {
{
.fourcc = V4L2_PIX_FMT_YUV420,
.mmal = MMAL_ENCODING_I420,
.depth = 12,
.mmal_component = COMP_CAMERA,
.ybbp = 1,
.remove_padding = true,
}, {
.fourcc = V4L2_PIX_FMT_YUYV,
.mmal = MMAL_ENCODING_YUYV,
.depth = 16,
.mmal_component = COMP_CAMERA,
.ybbp = 2,
.remove_padding = false,
}, {
.fourcc = V4L2_PIX_FMT_RGB24,
.mmal = MMAL_ENCODING_RGB24,
.depth = 24,
.mmal_component = COMP_CAMERA,
.ybbp = 3,
.remove_padding = false,
}, {
.fourcc = V4L2_PIX_FMT_JPEG,
.flags = V4L2_FMT_FLAG_COMPRESSED,
.mmal = MMAL_ENCODING_JPEG,
.depth = 8,
.mmal_component = COMP_IMAGE_ENCODE,
.ybbp = 0,
.remove_padding = false,
}, {
.fourcc = V4L2_PIX_FMT_H264,
.flags = V4L2_FMT_FLAG_COMPRESSED,
.mmal = MMAL_ENCODING_H264,
.depth = 8,
.mmal_component = COMP_VIDEO_ENCODE,
.ybbp = 0,
.remove_padding = false,
}, {
.fourcc = V4L2_PIX_FMT_MJPEG,
.flags = V4L2_FMT_FLAG_COMPRESSED,
.mmal = MMAL_ENCODING_MJPEG,
.depth = 8,
.mmal_component = COMP_VIDEO_ENCODE,
.ybbp = 0,
.remove_padding = false,
}, {
.fourcc = V4L2_PIX_FMT_YVYU,
.mmal = MMAL_ENCODING_YVYU,
.depth = 16,
.mmal_component = COMP_CAMERA,
.ybbp = 2,
.remove_padding = false,
}, {
.fourcc = V4L2_PIX_FMT_VYUY,
.mmal = MMAL_ENCODING_VYUY,
.depth = 16,
.mmal_component = COMP_CAMERA,
.ybbp = 2,
.remove_padding = false,
}, {
.fourcc = V4L2_PIX_FMT_UYVY,
.mmal = MMAL_ENCODING_UYVY,
.depth = 16,
.mmal_component = COMP_CAMERA,
.ybbp = 2,
.remove_padding = false,
}, {
.fourcc = V4L2_PIX_FMT_NV12,
.mmal = MMAL_ENCODING_NV12,
.depth = 12,
.mmal_component = COMP_CAMERA,
.ybbp = 1,
.remove_padding = true,
}, {
.fourcc = V4L2_PIX_FMT_BGR24,
.mmal = MMAL_ENCODING_BGR24,
.depth = 24,
.mmal_component = COMP_CAMERA,
.ybbp = 3,
.remove_padding = false,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
.mmal = MMAL_ENCODING_YV12,
.depth = 12,
.mmal_component = COMP_CAMERA,
.ybbp = 1,
.remove_padding = true,
}, {
.fourcc = V4L2_PIX_FMT_NV21,
.mmal = MMAL_ENCODING_NV21,
.depth = 12,
.mmal_component = COMP_CAMERA,
.ybbp = 1,
.remove_padding = true,
}, {
.fourcc = V4L2_PIX_FMT_BGR32,
.mmal = MMAL_ENCODING_BGRA,
.depth = 32,
.mmal_component = COMP_CAMERA,
.ybbp = 4,
.remove_padding = false,
},
};
static struct mmal_fmt *get_format(struct v4l2_format *f)
{
struct mmal_fmt *fmt;
unsigned int k;
for (k = 0; k < ARRAY_SIZE(formats); k++) {
fmt = &formats[k];
if (fmt->fourcc == f->fmt.pix.pixelformat)
return fmt;
}
return NULL;
}
/* ------------------------------------------------------------------
* Videobuf queue operations
* ------------------------------------------------------------------
*/
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_ctxs[])
{
struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
unsigned long size;
/* refuse queue setup if port is not configured */
if (!dev->capture.port) {
v4l2_err(&dev->v4l2_dev,
"%s: capture port not configured\n", __func__);
return -EINVAL;
}
/* Handle CREATE_BUFS situation - *nplanes != 0 */
if (*nplanes) {
if (*nplanes != 1 ||
sizes[0] < dev->capture.port->current_buffer.size) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s: dev:%p Invalid buffer request from CREATE_BUFS, size %u < %u, nplanes %u != 1\n",
__func__, dev, sizes[0],
dev->capture.port->current_buffer.size,
*nplanes);
return -EINVAL;
} else {
return 0;
}
}
/* Handle REQBUFS situation */
size = dev->capture.port->current_buffer.size;
if (size == 0) {
v4l2_err(&dev->v4l2_dev,
"%s: capture port buffer size is zero\n", __func__);
return -EINVAL;
}
if (*nbuffers < dev->capture.port->minimum_buffer.num)
*nbuffers = dev->capture.port->minimum_buffer.num;
dev->capture.port->current_buffer.num = *nbuffers;
*nplanes = 1;
sizes[0] = size;
/*
* videobuf2-vmalloc allocator is context-less so no need to set
* alloc_ctxs array.
*/
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
__func__, dev);
return 0;
}
static int buffer_init(struct vb2_buffer *vb)
{
struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
struct vb2_mmal_buffer *buf =
container_of(vb2, struct vb2_mmal_buffer, vb);
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p, vb %p\n",
__func__, dev, vb);
buf->mmal.buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
buf->mmal.buffer_size = vb2_plane_size(&buf->vb.vb2_buf, 0);
return mmal_vchi_buffer_init(dev->instance, &buf->mmal);
}
static int buffer_prepare(struct vb2_buffer *vb)
{
struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
unsigned long size;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p, vb %p\n",
__func__, dev, vb);
if (!dev->capture.port || !dev->capture.fmt)
return -ENODEV;
size = dev->capture.stride * dev->capture.height;
if (vb2_plane_size(vb, 0) < size) {
v4l2_err(&dev->v4l2_dev,
"%s data will not fit into plane (%lu < %lu)\n",
__func__, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
return 0;
}
static void buffer_cleanup(struct vb2_buffer *vb)
{
struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
struct vb2_mmal_buffer *buf =
container_of(vb2, struct vb2_mmal_buffer, vb);
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p, vb %p\n",
__func__, dev, vb);
mmal_vchi_buffer_cleanup(&buf->mmal);
}
static inline bool is_capturing(struct bcm2835_mmal_dev *dev)
{
return dev->capture.camera_port ==
&dev->component[COMP_CAMERA]->output[CAM_PORT_CAPTURE];
}
static void buffer_cb(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port,
int status,
struct mmal_buffer *mmal_buf)
{
struct bcm2835_mmal_dev *dev = port->cb_ctx;
struct vb2_mmal_buffer *buf =
container_of(mmal_buf, struct vb2_mmal_buffer, mmal);
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s: status:%d, buf:%p, length:%lu, flags %u, pts %lld\n",
__func__, status, buf, mmal_buf->length, mmal_buf->mmal_flags,
mmal_buf->pts);
if (status) {
/* error in transfer */
if (buf) {
/* there was a buffer with the error so return it */
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
return;
}
if (mmal_buf->length == 0) {
/* stream ended */
if (dev->capture.frame_count) {
/* empty buffer whilst capturing - expected to be an
* EOS, so grab another frame
*/
if (is_capturing(dev)) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Grab another frame");
vchiq_mmal_port_parameter_set(
instance,
dev->capture.camera_port,
MMAL_PARAMETER_CAPTURE,
&dev->capture.frame_count,
sizeof(dev->capture.frame_count));
}
if (vchiq_mmal_submit_buffer(instance, port,
&buf->mmal))
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Failed to return EOS buffer");
} else {
/* stopping streaming.
* return buffer, and signal frame completion
*/
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
complete(&dev->capture.frame_cmplt);
}
return;
}
if (!dev->capture.frame_count) {
/* signal frame completion */
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
complete(&dev->capture.frame_cmplt);
return;
}
if (dev->capture.vc_start_timestamp != -1 && mmal_buf->pts) {
ktime_t timestamp;
s64 runtime_us = mmal_buf->pts -
dev->capture.vc_start_timestamp;
timestamp = ktime_add_us(dev->capture.kernel_start_ts,
runtime_us);
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Convert start time %llu and %llu with offset %llu to %llu\n",
ktime_to_ns(dev->capture.kernel_start_ts),
dev->capture.vc_start_timestamp, mmal_buf->pts,
ktime_to_ns(timestamp));
buf->vb.vb2_buf.timestamp = ktime_to_ns(timestamp);
} else {
buf->vb.vb2_buf.timestamp = ktime_get_ns();
}
buf->vb.sequence = dev->capture.sequence++;
buf->vb.field = V4L2_FIELD_NONE;
vb2_set_plane_payload(&buf->vb.vb2_buf, 0, mmal_buf->length);
if (mmal_buf->mmal_flags & MMAL_BUFFER_HEADER_FLAG_KEYFRAME)
buf->vb.flags |= V4L2_BUF_FLAG_KEYFRAME;
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
if (mmal_buf->mmal_flags & MMAL_BUFFER_HEADER_FLAG_EOS &&
is_capturing(dev)) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Grab another frame as buffer has EOS");
vchiq_mmal_port_parameter_set(
instance,
dev->capture.camera_port,
MMAL_PARAMETER_CAPTURE,
&dev->capture.frame_count,
sizeof(dev->capture.frame_count));
}
}
static int enable_camera(struct bcm2835_mmal_dev *dev)
{
int ret;
if (!dev->camera_use_count) {
ret = vchiq_mmal_port_parameter_set(
dev->instance,
&dev->component[COMP_CAMERA]->control,
MMAL_PARAMETER_CAMERA_NUM, &dev->camera_num,
sizeof(dev->camera_num));
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"Failed setting camera num, ret %d\n", ret);
return -EINVAL;
}
ret = vchiq_mmal_component_enable(dev->instance,
dev->component[COMP_CAMERA]);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"Failed enabling camera, ret %d\n", ret);
return -EINVAL;
}
}
dev->camera_use_count++;
v4l2_dbg(1, bcm2835_v4l2_debug,
&dev->v4l2_dev, "enabled camera (refcount %d)\n",
dev->camera_use_count);
return 0;
}
static int disable_camera(struct bcm2835_mmal_dev *dev)
{
int ret;
if (!dev->camera_use_count) {
v4l2_err(&dev->v4l2_dev,
"Disabled the camera when already disabled\n");
return -EINVAL;
}
dev->camera_use_count--;
if (!dev->camera_use_count) {
unsigned int i = 0xFFFFFFFF;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Disabling camera\n");
ret = vchiq_mmal_component_disable(dev->instance,
dev->component[COMP_CAMERA]);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"Failed disabling camera, ret %d\n", ret);
return -EINVAL;
}
vchiq_mmal_port_parameter_set(
dev->instance,
&dev->component[COMP_CAMERA]->control,
MMAL_PARAMETER_CAMERA_NUM, &i,
sizeof(i));
}
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Camera refcount now %d\n", dev->camera_use_count);
return 0;
}
static void buffer_queue(struct vb2_buffer *vb)
{
struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
struct vb2_mmal_buffer *buf =
container_of(vb2, struct vb2_mmal_buffer, vb);
int ret;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s: dev:%p buf:%p, idx %u\n",
__func__, dev, buf, vb2->vb2_buf.index);
ret = vchiq_mmal_submit_buffer(dev->instance, dev->capture.port,
&buf->mmal);
if (ret < 0)
v4l2_err(&dev->v4l2_dev, "%s: error submitting buffer\n",
__func__);
}
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
int ret;
u32 parameter_size;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
__func__, dev);
/* ensure a format has actually been set */
if (!dev->capture.port)
return -EINVAL;
if (enable_camera(dev) < 0) {
v4l2_err(&dev->v4l2_dev, "Failed to enable camera\n");
return -EINVAL;
}
/*init_completion(&dev->capture.frame_cmplt); */
/* enable frame capture */
dev->capture.frame_count = 1;
/* reset sequence number */
dev->capture.sequence = 0;
/* if the preview is not already running, wait for a few frames for AGC
* to settle down.
*/
if (!dev->component[COMP_PREVIEW]->enabled)
msleep(300);
/* enable the connection from camera to encoder (if applicable) */
if (dev->capture.camera_port != dev->capture.port &&
dev->capture.camera_port) {
ret = vchiq_mmal_port_enable(dev->instance,
dev->capture.camera_port, NULL);
if (ret) {
v4l2_err(&dev->v4l2_dev,
"Failed to enable encode tunnel - error %d\n",
ret);
return -1;
}
}
/* Get VC timestamp at this point in time */
parameter_size = sizeof(dev->capture.vc_start_timestamp);
if (vchiq_mmal_port_parameter_get(dev->instance,
dev->capture.camera_port,
MMAL_PARAMETER_SYSTEM_TIME,
&dev->capture.vc_start_timestamp,
¶meter_size)) {
v4l2_err(&dev->v4l2_dev,
"Failed to get VC start time - update your VC f/w\n");
/* Flag to indicate just to rely on kernel timestamps */
dev->capture.vc_start_timestamp = -1;
} else {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Start time %lld size %d\n",
dev->capture.vc_start_timestamp, parameter_size);
}
dev->capture.kernel_start_ts = ktime_get();
/* enable the camera port */
dev->capture.port->cb_ctx = dev;
ret = vchiq_mmal_port_enable(dev->instance, dev->capture.port,
buffer_cb);
if (ret) {
v4l2_err(&dev->v4l2_dev,
"Failed to enable capture port - error %d. Disabling camera port again\n",
ret);
vchiq_mmal_port_disable(dev->instance,
dev->capture.camera_port);
if (disable_camera(dev) < 0) {
v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
return -EINVAL;
}
return -1;
}
/* capture the first frame */
vchiq_mmal_port_parameter_set(dev->instance,
dev->capture.camera_port,
MMAL_PARAMETER_CAPTURE,
&dev->capture.frame_count,
sizeof(dev->capture.frame_count));
return 0;
}
/* abort streaming and wait for last buffer */
static void stop_streaming(struct vb2_queue *vq)
{
int ret;
unsigned long timeout;
struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
struct vchiq_mmal_port *port = dev->capture.port;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
__func__, dev);
init_completion(&dev->capture.frame_cmplt);
dev->capture.frame_count = 0;
/* ensure a format has actually been set */
if (!port) {
v4l2_err(&dev->v4l2_dev,
"no capture port - stream not started?\n");
return;
}
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "stopping capturing\n");
/* stop capturing frames */
vchiq_mmal_port_parameter_set(dev->instance,
dev->capture.camera_port,
MMAL_PARAMETER_CAPTURE,
&dev->capture.frame_count,
sizeof(dev->capture.frame_count));
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"disabling connection\n");
/* disable the connection from camera to encoder */
ret = vchiq_mmal_port_disable(dev->instance, dev->capture.camera_port);
if (!ret && dev->capture.camera_port != port) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"disabling port\n");
ret = vchiq_mmal_port_disable(dev->instance, port);
} else if (dev->capture.camera_port != port) {
v4l2_err(&dev->v4l2_dev, "port_disable failed, error %d\n",
ret);
}
/* wait for all buffers to be returned */
while (atomic_read(&port->buffers_with_vpu)) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s: Waiting for buffers to be returned - %d outstanding\n",
__func__, atomic_read(&port->buffers_with_vpu));
timeout = wait_for_completion_timeout(&dev->capture.frame_cmplt,
HZ);
if (timeout == 0) {
v4l2_err(&dev->v4l2_dev, "%s: Timeout waiting for buffers to be returned - %d outstanding\n",
__func__,
atomic_read(&port->buffers_with_vpu));
break;
}
}
if (disable_camera(dev) < 0)
v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
}
static const struct vb2_ops bcm2835_mmal_video_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
.buf_cleanup = buffer_cleanup,
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------
* IOCTL operations
* ------------------------------------------------------------------
*/
static int set_overlay_params(struct bcm2835_mmal_dev *dev,
struct vchiq_mmal_port *port)
{
struct mmal_parameter_displayregion prev_config = {
.set = MMAL_DISPLAY_SET_LAYER |
MMAL_DISPLAY_SET_ALPHA |
MMAL_DISPLAY_SET_DEST_RECT |
MMAL_DISPLAY_SET_FULLSCREEN,
.layer = 2,
.alpha = dev->overlay.global_alpha,
.fullscreen = 0,
.dest_rect = {
.x = dev->overlay.w.left,
.y = dev->overlay.w.top,
.width = dev->overlay.w.width,
.height = dev->overlay.w.height,
},
};
return vchiq_mmal_port_parameter_set(dev->instance, port,
MMAL_PARAMETER_DISPLAYREGION,
&prev_config, sizeof(prev_config));
}
/* overlay ioctl */
static int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct mmal_fmt *fmt;
if (f->index >= ARRAY_SIZE(formats))
return -EINVAL;
fmt = &formats[f->index];
f->pixelformat = fmt->fourcc;
return 0;
}
static int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
struct bcm2835_mmal_dev *dev = video_drvdata(file);
f->fmt.win = dev->overlay;
return 0;
}
static int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
struct bcm2835_mmal_dev *dev = video_drvdata(file);
f->fmt.win.field = V4L2_FIELD_NONE;
f->fmt.win.chromakey = 0;
f->fmt.win.clips = NULL;
f->fmt.win.clipcount = 0;
f->fmt.win.bitmap = NULL;
v4l_bound_align_image(&f->fmt.win.w.width, MIN_WIDTH, dev->max_width, 1,
&f->fmt.win.w.height, MIN_HEIGHT, dev->max_height,
1, 0);
v4l_bound_align_image(&f->fmt.win.w.left, MIN_WIDTH, dev->max_width, 1,
&f->fmt.win.w.top, MIN_HEIGHT, dev->max_height,
1, 0);
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Overlay: Now w/h %dx%d l/t %dx%d\n",
f->fmt.win.w.width, f->fmt.win.w.height,
f->fmt.win.w.left, f->fmt.win.w.top);
v4l2_dump_win_format(1,
bcm2835_v4l2_debug,
&dev->v4l2_dev,
&f->fmt.win,
__func__);
return 0;
}
static int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
struct bcm2835_mmal_dev *dev = video_drvdata(file);
vidioc_try_fmt_vid_overlay(file, priv, f);
dev->overlay = f->fmt.win;
if (dev->component[COMP_PREVIEW]->enabled) {
set_overlay_params(dev,
&dev->component[COMP_PREVIEW]->input[0]);
}
return 0;
}
static int vidioc_overlay(struct file *file, void *f, unsigned int on)
{
int ret;
struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct vchiq_mmal_port *src;
struct vchiq_mmal_port *dst;
if ((on && dev->component[COMP_PREVIEW]->enabled) ||
(!on && !dev->component[COMP_PREVIEW]->enabled))
return 0; /* already in requested state */
src = &dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
if (!on) {
/* disconnect preview ports and disable component */
ret = vchiq_mmal_port_disable(dev->instance, src);
if (!ret)
ret = vchiq_mmal_port_connect_tunnel(dev->instance, src,
NULL);
if (ret >= 0)
ret = vchiq_mmal_component_disable(
dev->instance,
dev->component[COMP_PREVIEW]);
disable_camera(dev);
return ret;
}
/* set preview port format and connect it to output */
dst = &dev->component[COMP_PREVIEW]->input[0];
ret = vchiq_mmal_port_set_format(dev->instance, src);
if (ret < 0)
return ret;
ret = set_overlay_params(dev, dst);
if (ret < 0)
return ret;
if (enable_camera(dev) < 0)
return -EINVAL;
ret = vchiq_mmal_component_enable(dev->instance,
dev->component[COMP_PREVIEW]);
if (ret < 0)
return ret;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "connecting %p to %p\n",
src, dst);
ret = vchiq_mmal_port_connect_tunnel(dev->instance, src, dst);
if (ret)
return ret;
return vchiq_mmal_port_enable(dev->instance, src, NULL);
}
static int vidioc_g_fbuf(struct file *file, void *fh,
struct v4l2_framebuffer *a)
{
/* The video overlay must stay within the framebuffer and can't be
* positioned independently.
*/
struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct vchiq_mmal_port *preview_port =
&dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
a->capability = V4L2_FBUF_CAP_EXTERNOVERLAY |
V4L2_FBUF_CAP_GLOBAL_ALPHA;
a->flags = V4L2_FBUF_FLAG_OVERLAY;
a->fmt.width = preview_port->es.video.width;
a->fmt.height = preview_port->es.video.height;
a->fmt.pixelformat = V4L2_PIX_FMT_YUV420;
a->fmt.bytesperline = preview_port->es.video.width;
a->fmt.sizeimage = (preview_port->es.video.width *
preview_port->es.video.height * 3) >> 1;
a->fmt.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
/* input ioctls */
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
/* only a single camera input */
if (inp->index)
return -EINVAL;
inp->type = V4L2_INPUT_TYPE_CAMERA;
sprintf((char *)inp->name, "Camera %u", inp->index);
return 0;
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
if (i)
return -EINVAL;
return 0;
}
/* capture ioctls */
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct bcm2835_mmal_dev *dev = video_drvdata(file);
u32 major;
u32 minor;
vchiq_mmal_version(dev->instance, &major, &minor);
strscpy(cap->driver, "bcm2835 mmal", sizeof(cap->driver));
snprintf((char *)cap->card, sizeof(cap->card), "mmal service %d.%d", major, minor);
snprintf((char *)cap->bus_info, sizeof(cap->bus_info), "platform:%s", dev->v4l2_dev.name);
return 0;
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct mmal_fmt *fmt;
if (f->index >= ARRAY_SIZE(formats))
return -EINVAL;
fmt = &formats[f->index];
f->pixelformat = fmt->fourcc;
return 0;
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct bcm2835_mmal_dev *dev = video_drvdata(file);
f->fmt.pix.width = dev->capture.width;
f->fmt.pix.height = dev->capture.height;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.pixelformat = dev->capture.fmt->fourcc;
f->fmt.pix.bytesperline = dev->capture.stride;
f->fmt.pix.sizeimage = dev->capture.buffersize;
if (dev->capture.fmt->fourcc == V4L2_PIX_FMT_RGB24)
f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
else if (dev->capture.fmt->fourcc == V4L2_PIX_FMT_JPEG)
f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
else
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
f->fmt.pix.priv = 0;
v4l2_dump_pix_format(1, bcm2835_v4l2_debug, &dev->v4l2_dev, &f->fmt.pix,
__func__);
return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct mmal_fmt *mfmt;
mfmt = get_format(f);
if (!mfmt) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Fourcc format (0x%08x) unknown.\n",
f->fmt.pix.pixelformat);
f->fmt.pix.pixelformat = formats[0].fourcc;
mfmt = get_format(f);
}
f->fmt.pix.field = V4L2_FIELD_NONE;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Clipping/aligning %dx%d format %08X\n",
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat);
v4l_bound_align_image(&f->fmt.pix.width, MIN_WIDTH, dev->max_width, 1,
&f->fmt.pix.height, MIN_HEIGHT, dev->max_height,
1, 0);
f->fmt.pix.bytesperline = f->fmt.pix.width * mfmt->ybbp;
if (!mfmt->remove_padding) {
if (mfmt->depth == 24) {
/*
* 24bpp is a pain as we can't use simple masking.
* Min stride is width aligned to 16, times 24bpp.
*/
f->fmt.pix.bytesperline =
((f->fmt.pix.width + 15) & ~15) * 3;
} else {
/*
* GPU isn't removing padding, so stride is aligned to
* 32
*/
int align_mask = ((32 * mfmt->depth) >> 3) - 1;
f->fmt.pix.bytesperline =
(f->fmt.pix.bytesperline + align_mask) &
~align_mask;
}
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Not removing padding, so bytes/line = %d\n",
f->fmt.pix.bytesperline);
}
/* Image buffer has to be padded to allow for alignment, even though
* we sometimes then remove that padding before delivering the buffer.
*/
f->fmt.pix.sizeimage = ((f->fmt.pix.height + 15) & ~15) *
(((f->fmt.pix.width + 31) & ~31) * mfmt->depth) >> 3;
if ((mfmt->flags & V4L2_FMT_FLAG_COMPRESSED) &&
f->fmt.pix.sizeimage < MIN_BUFFER_SIZE)
f->fmt.pix.sizeimage = MIN_BUFFER_SIZE;
if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24)
f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
else if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
else
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
f->fmt.pix.priv = 0;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Now %dx%d format %08X\n",
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat);
v4l2_dump_pix_format(1, bcm2835_v4l2_debug, &dev->v4l2_dev, &f->fmt.pix,
__func__);
return 0;
}
static int mmal_setup_video_component(struct bcm2835_mmal_dev *dev,
struct v4l2_format *f)
{
bool overlay_enabled = !!dev->component[COMP_PREVIEW]->enabled;
struct vchiq_mmal_port *preview_port;
int ret;
preview_port = &dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
/* Preview and encode ports need to match on resolution */
if (overlay_enabled) {
/* Need to disable the overlay before we can update
* the resolution
*/
ret = vchiq_mmal_port_disable(dev->instance, preview_port);
if (!ret) {
ret = vchiq_mmal_port_connect_tunnel(dev->instance,
preview_port,
NULL);
}
}
preview_port->es.video.width = f->fmt.pix.width;
preview_port->es.video.height = f->fmt.pix.height;
preview_port->es.video.crop.x = 0;
preview_port->es.video.crop.y = 0;
preview_port->es.video.crop.width = f->fmt.pix.width;
preview_port->es.video.crop.height = f->fmt.pix.height;
preview_port->es.video.frame_rate.numerator =
dev->capture.timeperframe.denominator;
preview_port->es.video.frame_rate.denominator =
dev->capture.timeperframe.numerator;
ret = vchiq_mmal_port_set_format(dev->instance, preview_port);
if (overlay_enabled) {
ret = vchiq_mmal_port_connect_tunnel(dev->instance,
preview_port,
&dev->component[COMP_PREVIEW]->input[0]);
if (ret)
return ret;
ret = vchiq_mmal_port_enable(dev->instance, preview_port, NULL);
}
return ret;
}
static int mmal_setup_encode_component(struct bcm2835_mmal_dev *dev,
struct v4l2_format *f,
struct vchiq_mmal_port *port,
struct vchiq_mmal_port *camera_port,
struct vchiq_mmal_component *component)
{
struct mmal_fmt *mfmt = get_format(f);
int ret;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"vid_cap - set up encode comp\n");
/* configure buffering */
camera_port->current_buffer.size = camera_port->recommended_buffer.size;
camera_port->current_buffer.num = camera_port->recommended_buffer.num;
ret = vchiq_mmal_port_connect_tunnel(dev->instance, camera_port,
&component->input[0]);
if (ret) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s failed to create connection\n", __func__);
/* ensure capture is not going to be tried */
dev->capture.port = NULL;
return ret;
}
port->es.video.width = f->fmt.pix.width;
port->es.video.height = f->fmt.pix.height;
port->es.video.crop.x = 0;
port->es.video.crop.y = 0;
port->es.video.crop.width = f->fmt.pix.width;
port->es.video.crop.height = f->fmt.pix.height;
port->es.video.frame_rate.numerator =
dev->capture.timeperframe.denominator;
port->es.video.frame_rate.denominator =
dev->capture.timeperframe.numerator;
port->format.encoding = mfmt->mmal;
port->format.encoding_variant = 0;
/* Set any encoding specific parameters */
switch (mfmt->mmal_component) {
case COMP_VIDEO_ENCODE:
port->format.bitrate = dev->capture.encode_bitrate;
break;
case COMP_IMAGE_ENCODE:
/* Could set EXIF parameters here */
break;
default:
break;
}
ret = vchiq_mmal_port_set_format(dev->instance, port);
if (ret) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s failed to set format %dx%d fmt %08X\n",
__func__,
f->fmt.pix.width,
f->fmt.pix.height,
f->fmt.pix.pixelformat);
return ret;
}
ret = vchiq_mmal_component_enable(dev->instance, component);
if (ret) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s Failed to enable encode components\n", __func__);
return ret;
}
/* configure buffering */
port->current_buffer.num = 1;
port->current_buffer.size = f->fmt.pix.sizeimage;
if (port->format.encoding == MMAL_ENCODING_JPEG) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"JPG - buf size now %d was %d\n",
f->fmt.pix.sizeimage,
port->current_buffer.size);
port->current_buffer.size =
(f->fmt.pix.sizeimage < (100 << 10)) ?
(100 << 10) : f->fmt.pix.sizeimage;
}
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"vid_cap - cur_buf.size set to %d\n", f->fmt.pix.sizeimage);
port->current_buffer.alignment = 0;
return 0;
}
static int mmal_setup_components(struct bcm2835_mmal_dev *dev,
struct v4l2_format *f)
{
int ret;
struct vchiq_mmal_port *port = NULL, *camera_port = NULL;
struct vchiq_mmal_component *encode_component = NULL;
struct mmal_fmt *mfmt = get_format(f);
bool remove_padding;
if (!mfmt)
return -EINVAL;
if (dev->capture.encode_component) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"vid_cap - disconnect previous tunnel\n");
/* Disconnect any previous connection */
vchiq_mmal_port_connect_tunnel(dev->instance,
dev->capture.camera_port, NULL);
dev->capture.camera_port = NULL;
ret = vchiq_mmal_component_disable(dev->instance,
dev->capture.encode_component);
if (ret)
v4l2_err(&dev->v4l2_dev,
"Failed to disable encode component %d\n",
ret);
dev->capture.encode_component = NULL;
}
/* format dependent port setup */
switch (mfmt->mmal_component) {
case COMP_CAMERA:
/* Make a further decision on port based on resolution */
if (f->fmt.pix.width <= max_video_width &&
f->fmt.pix.height <= max_video_height)
camera_port =
&dev->component[COMP_CAMERA]->output[CAM_PORT_VIDEO];
else
camera_port =
&dev->component[COMP_CAMERA]->output[CAM_PORT_CAPTURE];
port = camera_port;
break;
case COMP_IMAGE_ENCODE:
encode_component = dev->component[COMP_IMAGE_ENCODE];
port = &dev->component[COMP_IMAGE_ENCODE]->output[0];
camera_port =
&dev->component[COMP_CAMERA]->output[CAM_PORT_CAPTURE];
break;
case COMP_VIDEO_ENCODE:
encode_component = dev->component[COMP_VIDEO_ENCODE];
port = &dev->component[COMP_VIDEO_ENCODE]->output[0];
camera_port =
&dev->component[COMP_CAMERA]->output[CAM_PORT_VIDEO];
break;
default:
break;
}
if (!port)
return -EINVAL;
if (encode_component)
camera_port->format.encoding = MMAL_ENCODING_OPAQUE;
else
camera_port->format.encoding = mfmt->mmal;
if (dev->rgb_bgr_swapped) {
if (camera_port->format.encoding == MMAL_ENCODING_RGB24)
camera_port->format.encoding = MMAL_ENCODING_BGR24;
else if (camera_port->format.encoding == MMAL_ENCODING_BGR24)
camera_port->format.encoding = MMAL_ENCODING_RGB24;
}
remove_padding = mfmt->remove_padding;
vchiq_mmal_port_parameter_set(dev->instance, camera_port,
MMAL_PARAMETER_NO_IMAGE_PADDING,
&remove_padding, sizeof(remove_padding));
camera_port->format.encoding_variant = 0;
camera_port->es.video.width = f->fmt.pix.width;
camera_port->es.video.height = f->fmt.pix.height;
camera_port->es.video.crop.x = 0;
camera_port->es.video.crop.y = 0;
camera_port->es.video.crop.width = f->fmt.pix.width;
camera_port->es.video.crop.height = f->fmt.pix.height;
camera_port->es.video.frame_rate.numerator = 0;
camera_port->es.video.frame_rate.denominator = 1;
camera_port->es.video.color_space = MMAL_COLOR_SPACE_JPEG_JFIF;
ret = vchiq_mmal_port_set_format(dev->instance, camera_port);
if (!ret &&
camera_port ==
&dev->component[COMP_CAMERA]->output[CAM_PORT_VIDEO]) {
ret = mmal_setup_video_component(dev, f);
}
if (ret) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s failed to set format %dx%d %08X\n", __func__,
f->fmt.pix.width, f->fmt.pix.height,
f->fmt.pix.pixelformat);
/* ensure capture is not going to be tried */
dev->capture.port = NULL;
return ret;
}
if (encode_component) {
ret = mmal_setup_encode_component(dev, f, port,
camera_port,
encode_component);
if (ret)
return ret;
} else {
/* configure buffering */
camera_port->current_buffer.num = 1;
camera_port->current_buffer.size = f->fmt.pix.sizeimage;
camera_port->current_buffer.alignment = 0;
}
dev->capture.fmt = mfmt;
dev->capture.stride = f->fmt.pix.bytesperline;
dev->capture.width = camera_port->es.video.crop.width;
dev->capture.height = camera_port->es.video.crop.height;
dev->capture.buffersize = port->current_buffer.size;
/* select port for capture */
dev->capture.port = port;
dev->capture.camera_port = camera_port;
dev->capture.encode_component = encode_component;
v4l2_dbg(1, bcm2835_v4l2_debug,
&dev->v4l2_dev,
"Set dev->capture.fmt %08X, %dx%d, stride %d, size %d",
port->format.encoding,
dev->capture.width, dev->capture.height,
dev->capture.stride, dev->capture.buffersize);
/* todo: Need to convert the vchiq/mmal error into a v4l2 error. */
return ret;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
int ret;
struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct mmal_fmt *mfmt;
/* try the format to set valid parameters */
ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret) {
v4l2_err(&dev->v4l2_dev,
"vid_cap - vidioc_try_fmt_vid_cap failed\n");
return ret;
}
/* if a capture is running refuse to set format */
if (vb2_is_busy(&dev->capture.vb_vidq)) {
v4l2_info(&dev->v4l2_dev, "%s device busy\n", __func__);
return -EBUSY;
}
/* If the format is unsupported v4l2 says we should switch to
* a supported one and not return an error.
*/
mfmt = get_format(f);
if (!mfmt) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Fourcc format (0x%08x) unknown.\n",
f->fmt.pix.pixelformat);
f->fmt.pix.pixelformat = formats[0].fourcc;
mfmt = get_format(f);
}
ret = mmal_setup_components(dev, f);
if (ret) {
v4l2_err(&dev->v4l2_dev,
"%s: failed to setup mmal components: %d\n",
__func__, ret);
ret = -EINVAL;
}
return ret;
}
static int vidioc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct bcm2835_mmal_dev *dev = video_drvdata(file);
static const struct v4l2_frmsize_stepwise sizes = {
MIN_WIDTH, 0, 2,
MIN_HEIGHT, 0, 2
};
int i;
if (fsize->index)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(formats); i++)
if (formats[i].fourcc == fsize->pixel_format)
break;
if (i == ARRAY_SIZE(formats))
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
fsize->stepwise = sizes;
fsize->stepwise.max_width = dev->max_width;
fsize->stepwise.max_height = dev->max_height;
return 0;
}
/* timeperframe is arbitrary and continuous */
static int vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fival)
{
struct bcm2835_mmal_dev *dev = video_drvdata(file);
int i;
if (fival->index)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(formats); i++)
if (formats[i].fourcc == fival->pixel_format)
break;
if (i == ARRAY_SIZE(formats))
return -EINVAL;
/* regarding width & height - we support any within range */
if (fival->width < MIN_WIDTH || fival->width > dev->max_width ||
fival->height < MIN_HEIGHT || fival->height > dev->max_height)
return -EINVAL;
fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
/* fill in stepwise (step=1.0 is required by V4L2 spec) */
fival->stepwise.min = tpf_min;
fival->stepwise.max = tpf_max;
fival->stepwise.step = (struct v4l2_fract) {1, 1};
return 0;
}
static int vidioc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
struct bcm2835_mmal_dev *dev = video_drvdata(file);
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
parm->parm.capture.timeperframe = dev->capture.timeperframe;
parm->parm.capture.readbuffers = 1;
return 0;
}
static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct v4l2_fract tpf;
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
tpf = parm->parm.capture.timeperframe;
/* tpf: {*, 0} resets timing; clip to [min, max]*/
tpf = tpf.denominator ? tpf : tpf_default;
tpf = V4L2_FRACT_COMPARE(tpf, <, tpf_min) ? tpf_min : tpf;
tpf = V4L2_FRACT_COMPARE(tpf, >, tpf_max) ? tpf_max : tpf;
dev->capture.timeperframe = tpf;
parm->parm.capture.timeperframe = tpf;
parm->parm.capture.readbuffers = 1;
parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
set_framerate_params(dev);
return 0;
}
static const struct v4l2_ioctl_ops camera0_ioctl_ops = {
/* overlay */
.vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
.vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
.vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
.vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
.vidioc_overlay = vidioc_overlay,
.vidioc_g_fbuf = vidioc_g_fbuf,
/* inputs */
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
/* capture */
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
/* buffer management */
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_enum_framesizes = vidioc_enum_framesizes,
.vidioc_enum_frameintervals = vidioc_enum_frameintervals,
.vidioc_g_parm = vidioc_g_parm,
.vidioc_s_parm = vidioc_s_parm,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/* ------------------------------------------------------------------
* Driver init/finalise
* ------------------------------------------------------------------
*/
static const struct v4l2_file_operations camera0_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.read = vb2_fop_read,
.poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = vb2_fop_mmap,
};
static const struct video_device vdev_template = {
.name = "camera0",
.fops = &camera0_fops,
.ioctl_ops = &camera0_ioctl_ops,
.release = video_device_release_empty,
.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY |
V4L2_CAP_STREAMING | V4L2_CAP_READWRITE,
};
/* Returns the number of cameras, and also the max resolution supported
* by those cameras.
*/
static int get_num_cameras(struct vchiq_mmal_instance *instance,
unsigned int resolutions[][2], int num_resolutions)
{
int ret;
struct vchiq_mmal_component *cam_info_component;
struct mmal_parameter_camera_info cam_info = {0};
u32 param_size = sizeof(cam_info);
int i;
/* create a camera_info component */
ret = vchiq_mmal_component_init(instance, "camera_info",
&cam_info_component);
if (ret < 0)
/* Unusual failure - let's guess one camera. */
return 1;
if (vchiq_mmal_port_parameter_get(instance,
&cam_info_component->control,
MMAL_PARAMETER_CAMERA_INFO,
&cam_info,
¶m_size)) {
pr_info("Failed to get camera info\n");
}
for (i = 0;
i < min_t(unsigned int, cam_info.num_cameras, num_resolutions);
i++) {
resolutions[i][0] = cam_info.cameras[i].max_width;
resolutions[i][1] = cam_info.cameras[i].max_height;
}
vchiq_mmal_component_finalise(instance,
cam_info_component);
return cam_info.num_cameras;
}
static int set_camera_parameters(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_component *camera,
struct bcm2835_mmal_dev *dev)
{
struct mmal_parameter_camera_config cam_config = {
.max_stills_w = dev->max_width,
.max_stills_h = dev->max_height,
.stills_yuv422 = 1,
.one_shot_stills = 1,
.max_preview_video_w = (max_video_width > 1920) ?
max_video_width : 1920,
.max_preview_video_h = (max_video_height > 1088) ?
max_video_height : 1088,
.num_preview_video_frames = 3,
.stills_capture_circular_buffer_height = 0,
.fast_preview_resume = 0,
.use_stc_timestamp = MMAL_PARAM_TIMESTAMP_MODE_RAW_STC
};
return vchiq_mmal_port_parameter_set(instance, &camera->control,
MMAL_PARAMETER_CAMERA_CONFIG,
&cam_config, sizeof(cam_config));
}
#define MAX_SUPPORTED_ENCODINGS 20
/* MMAL instance and component init */
static int mmal_init(struct bcm2835_mmal_dev *dev)
{
int ret;
struct mmal_es_format_local *format;
u32 supported_encodings[MAX_SUPPORTED_ENCODINGS];
u32 param_size;
struct vchiq_mmal_component *camera;
ret = vchiq_mmal_init(&dev->instance);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: vchiq mmal init failed %d\n",
__func__, ret);
return ret;
}
/* get the camera component ready */
ret = vchiq_mmal_component_init(dev->instance, "ril.camera",
&dev->component[COMP_CAMERA]);
if (ret < 0)
goto unreg_mmal;
camera = dev->component[COMP_CAMERA];
if (camera->outputs < CAM_PORT_COUNT) {
v4l2_err(&dev->v4l2_dev, "%s: too few camera outputs %d needed %d\n",
__func__, camera->outputs, CAM_PORT_COUNT);
ret = -EINVAL;
goto unreg_camera;
}
ret = set_camera_parameters(dev->instance,
camera,
dev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: unable to set camera parameters: %d\n",
__func__, ret);
goto unreg_camera;
}
/* There was an error in the firmware that meant the camera component
* produced BGR instead of RGB.
* This is now fixed, but in order to support the old firmwares, we
* have to check.
*/
dev->rgb_bgr_swapped = true;
param_size = sizeof(supported_encodings);
ret = vchiq_mmal_port_parameter_get(dev->instance,
&camera->output[CAM_PORT_CAPTURE],
MMAL_PARAMETER_SUPPORTED_ENCODINGS,
&supported_encodings,
¶m_size);
if (ret == 0) {
int i;
for (i = 0; i < param_size / sizeof(u32); i++) {
if (supported_encodings[i] == MMAL_ENCODING_BGR24) {
/* Found BGR24 first - old firmware. */
break;
}
if (supported_encodings[i] == MMAL_ENCODING_RGB24) {
/* Found RGB24 first
* new firmware, so use RGB24.
*/
dev->rgb_bgr_swapped = false;
break;
}
}
}
format = &camera->output[CAM_PORT_PREVIEW].format;
format->encoding = MMAL_ENCODING_OPAQUE;
format->encoding_variant = MMAL_ENCODING_I420;
format->es->video.width = 1024;
format->es->video.height = 768;
format->es->video.crop.x = 0;
format->es->video.crop.y = 0;
format->es->video.crop.width = 1024;
format->es->video.crop.height = 768;
format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
format->es->video.frame_rate.denominator = 1;
format = &camera->output[CAM_PORT_VIDEO].format;
format->encoding = MMAL_ENCODING_OPAQUE;
format->encoding_variant = MMAL_ENCODING_I420;
format->es->video.width = 1024;
format->es->video.height = 768;
format->es->video.crop.x = 0;
format->es->video.crop.y = 0;
format->es->video.crop.width = 1024;
format->es->video.crop.height = 768;
format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
format->es->video.frame_rate.denominator = 1;
format = &camera->output[CAM_PORT_CAPTURE].format;
format->encoding = MMAL_ENCODING_OPAQUE;
format->es->video.width = 2592;
format->es->video.height = 1944;
format->es->video.crop.x = 0;
format->es->video.crop.y = 0;
format->es->video.crop.width = 2592;
format->es->video.crop.height = 1944;
format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
format->es->video.frame_rate.denominator = 1;
dev->capture.width = format->es->video.width;
dev->capture.height = format->es->video.height;
dev->capture.fmt = &formats[0];
dev->capture.encode_component = NULL;
dev->capture.timeperframe = tpf_default;
dev->capture.enc_profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
dev->capture.enc_level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
/* get the preview component ready */
ret = vchiq_mmal_component_init(dev->instance, "ril.video_render",
&dev->component[COMP_PREVIEW]);
if (ret < 0)
goto unreg_camera;
if (dev->component[COMP_PREVIEW]->inputs < 1) {
ret = -EINVAL;
v4l2_err(&dev->v4l2_dev, "%s: too few input ports %d needed %d\n",
__func__, dev->component[COMP_PREVIEW]->inputs, 1);
goto unreg_preview;
}
/* get the image encoder component ready */
ret = vchiq_mmal_component_init(dev->instance, "ril.image_encode",
&dev->component[COMP_IMAGE_ENCODE]);
if (ret < 0)
goto unreg_preview;
if (dev->component[COMP_IMAGE_ENCODE]->inputs < 1) {
ret = -EINVAL;
v4l2_err(&dev->v4l2_dev, "%s: too few input ports %d needed %d\n",
__func__, dev->component[COMP_IMAGE_ENCODE]->inputs,
1);
goto unreg_image_encoder;
}
/* get the video encoder component ready */
ret = vchiq_mmal_component_init(dev->instance, "ril.video_encode",
&dev->component[COMP_VIDEO_ENCODE]);
if (ret < 0)
goto unreg_image_encoder;
if (dev->component[COMP_VIDEO_ENCODE]->inputs < 1) {
ret = -EINVAL;
v4l2_err(&dev->v4l2_dev, "%s: too few input ports %d needed %d\n",
__func__, dev->component[COMP_VIDEO_ENCODE]->inputs,
1);
goto unreg_vid_encoder;
}
{
struct vchiq_mmal_port *encoder_port =
&dev->component[COMP_VIDEO_ENCODE]->output[0];
encoder_port->format.encoding = MMAL_ENCODING_H264;
ret = vchiq_mmal_port_set_format(dev->instance,
encoder_port);
}
{
unsigned int enable = 1;
vchiq_mmal_port_parameter_set(
dev->instance,
&dev->component[COMP_VIDEO_ENCODE]->control,
MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT,
&enable, sizeof(enable));
vchiq_mmal_port_parameter_set(dev->instance,
&dev->component[COMP_VIDEO_ENCODE]->control,
MMAL_PARAMETER_MINIMISE_FRAGMENTATION,
&enable,
sizeof(enable));
}
ret = bcm2835_mmal_set_all_camera_controls(dev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: failed to set all camera controls: %d\n",
__func__, ret);
goto unreg_vid_encoder;
}
return 0;
unreg_vid_encoder:
pr_err("Cleanup: Destroy video encoder\n");
vchiq_mmal_component_finalise(dev->instance,
dev->component[COMP_VIDEO_ENCODE]);
unreg_image_encoder:
pr_err("Cleanup: Destroy image encoder\n");
vchiq_mmal_component_finalise(dev->instance,
dev->component[COMP_IMAGE_ENCODE]);
unreg_preview:
pr_err("Cleanup: Destroy video render\n");
vchiq_mmal_component_finalise(dev->instance,
dev->component[COMP_PREVIEW]);
unreg_camera:
pr_err("Cleanup: Destroy camera\n");
vchiq_mmal_component_finalise(dev->instance,
dev->component[COMP_CAMERA]);
unreg_mmal:
vchiq_mmal_finalise(dev->instance);
return ret;
}
static int bcm2835_mmal_init_device(struct bcm2835_mmal_dev *dev, struct video_device *vfd)
{
int ret;
*vfd = vdev_template;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->lock = &dev->mutex;
vfd->queue = &dev->capture.vb_vidq;
/* video device needs to be able to access instance data */
video_set_drvdata(vfd, dev);
ret = video_register_device(vfd, VFL_TYPE_VIDEO,
video_nr[dev->camera_num]);
if (ret < 0)
return ret;
v4l2_info(vfd->v4l2_dev,
"V4L2 device registered as %s - stills mode > %dx%d\n",
video_device_node_name(vfd),
max_video_width, max_video_height);
return 0;
}
static void bcm2835_cleanup_instance(struct bcm2835_mmal_dev *dev)
{
if (!dev)
return;
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->vdev));
video_unregister_device(&dev->vdev);
if (dev->capture.encode_component) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"mmal_exit - disconnect tunnel\n");
vchiq_mmal_port_connect_tunnel(dev->instance,
dev->capture.camera_port, NULL);
vchiq_mmal_component_disable(dev->instance,
dev->capture.encode_component);
}
vchiq_mmal_component_disable(dev->instance,
dev->component[COMP_CAMERA]);
vchiq_mmal_component_finalise(dev->instance,
dev->component[COMP_VIDEO_ENCODE]);
vchiq_mmal_component_finalise(dev->instance,
dev->component[COMP_IMAGE_ENCODE]);
vchiq_mmal_component_finalise(dev->instance,
dev->component[COMP_PREVIEW]);
vchiq_mmal_component_finalise(dev->instance,
dev->component[COMP_CAMERA]);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
}
static struct v4l2_format default_v4l2_format = {
.fmt.pix.pixelformat = V4L2_PIX_FMT_JPEG,
.fmt.pix.width = 1024,
.fmt.pix.bytesperline = 0,
.fmt.pix.height = 768,
.fmt.pix.sizeimage = 1024 * 768,
};
static int bcm2835_mmal_probe(struct platform_device *pdev)
{
int ret;
struct bcm2835_mmal_dev *dev;
struct vb2_queue *q;
int camera;
unsigned int num_cameras;
struct vchiq_mmal_instance *instance;
unsigned int resolutions[MAX_BCM2835_CAMERAS][2];
int i;
ret = vchiq_mmal_init(&instance);
if (ret < 0)
return ret;
num_cameras = get_num_cameras(instance,
resolutions,
MAX_BCM2835_CAMERAS);
if (num_cameras < 1) {
ret = -ENODEV;
goto cleanup_mmal;
}
if (num_cameras > MAX_BCM2835_CAMERAS)
num_cameras = MAX_BCM2835_CAMERAS;
for (camera = 0; camera < num_cameras; camera++) {
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
goto cleanup_gdev;
}
/* v4l2 core mutex used to protect all fops and v4l2 ioctls. */
mutex_init(&dev->mutex);
dev->max_width = resolutions[camera][0];
dev->max_height = resolutions[camera][1];
/* setup device defaults */
dev->overlay.w.left = 150;
dev->overlay.w.top = 50;
dev->overlay.w.width = 1024;
dev->overlay.w.height = 768;
dev->overlay.clipcount = 0;
dev->overlay.field = V4L2_FIELD_NONE;
dev->overlay.global_alpha = 255;
dev->capture.fmt = &formats[3]; /* JPEG */
/* v4l device registration */
dev->camera_num = v4l2_device_set_name(&dev->v4l2_dev, KBUILD_MODNAME,
&camera_instance);
ret = v4l2_device_register(NULL, &dev->v4l2_dev);
if (ret) {
dev_err(&pdev->dev, "%s: could not register V4L2 device: %d\n",
__func__, ret);
goto free_dev;
}
/* setup v4l controls */
ret = bcm2835_mmal_init_controls(dev, &dev->ctrl_handler);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: could not init controls: %d\n",
__func__, ret);
goto unreg_dev;
}
dev->v4l2_dev.ctrl_handler = &dev->ctrl_handler;
/* mmal init */
dev->instance = instance;
ret = mmal_init(dev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: mmal init failed: %d\n",
__func__, ret);
goto unreg_dev;
}
/* initialize queue */
q = &dev->capture.vb_vidq;
memset(q, 0, sizeof(*q));
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vb2_mmal_buffer);
q->ops = &bcm2835_mmal_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->mutex;
ret = vb2_queue_init(q);
if (ret < 0)
goto unreg_dev;
/* initialise video devices */
ret = bcm2835_mmal_init_device(dev, &dev->vdev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: could not init device: %d\n",
__func__, ret);
goto unreg_dev;
}
/* Really want to call vidioc_s_fmt_vid_cap with the default
* format, but currently the APIs don't join up.
*/
ret = mmal_setup_components(dev, &default_v4l2_format);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: could not setup components: %d\n",
__func__, ret);
goto unreg_dev;
}
v4l2_info(&dev->v4l2_dev, "Broadcom 2835 MMAL video capture loaded.\n");
gdev[camera] = dev;
}
return 0;
unreg_dev:
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister(&dev->v4l2_dev);
free_dev:
kfree(dev);
cleanup_gdev:
for (i = 0; i < camera; i++) {
bcm2835_cleanup_instance(gdev[i]);
gdev[i] = NULL;
}
cleanup_mmal:
vchiq_mmal_finalise(instance);
return ret;
}
static void bcm2835_mmal_remove(struct platform_device *pdev)
{
int camera;
struct vchiq_mmal_instance *instance = gdev[0]->instance;
for (camera = 0; camera < MAX_BCM2835_CAMERAS; camera++) {
bcm2835_cleanup_instance(gdev[camera]);
gdev[camera] = NULL;
}
vchiq_mmal_finalise(instance);
}
static struct platform_driver bcm2835_camera_driver = {
.probe = bcm2835_mmal_probe,
.remove_new = bcm2835_mmal_remove,
.driver = {
.name = "bcm2835-camera",
},
};
module_platform_driver(bcm2835_camera_driver)
MODULE_DESCRIPTION("Broadcom 2835 MMAL video capture");
MODULE_AUTHOR("Vincent Sanders");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:bcm2835-camera");
| linux-master | drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
* Authors: Vincent Sanders @ Collabora
* Dave Stevenson @ Broadcom
* (now [email protected])
* Simon Mellor @ Broadcom
* Luke Diamand @ Broadcom
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-common.h>
#include "../vchiq-mmal/mmal-common.h"
#include "../vchiq-mmal/mmal-vchiq.h"
#include "../vchiq-mmal/mmal-parameters.h"
#include "bcm2835-camera.h"
/* The supported V4L2_CID_AUTO_EXPOSURE_BIAS values are from -4.0 to +4.0.
* MMAL values are in 1/6th increments so the MMAL range is -24 to +24.
* V4L2 docs say value "is expressed in terms of EV, drivers should interpret
* the values as 0.001 EV units, where the value 1000 stands for +1 EV."
* V4L2 is limited to a max of 32 values in a menu, so count in 1/3rds from
* -4 to +4
*/
static const s64 ev_bias_qmenu[] = {
-4000, -3667, -3333,
-3000, -2667, -2333,
-2000, -1667, -1333,
-1000, -667, -333,
0, 333, 667,
1000, 1333, 1667,
2000, 2333, 2667,
3000, 3333, 3667,
4000
};
/* Supported ISO values (*1000)
* ISOO = auto ISO
*/
static const s64 iso_qmenu[] = {
0, 100000, 200000, 400000, 800000,
};
static const u32 iso_values[] = {
0, 100, 200, 400, 800,
};
enum bcm2835_mmal_ctrl_type {
MMAL_CONTROL_TYPE_STD,
MMAL_CONTROL_TYPE_STD_MENU,
MMAL_CONTROL_TYPE_INT_MENU,
MMAL_CONTROL_TYPE_CLUSTER, /* special cluster entry */
};
struct bcm2835_mmal_v4l2_ctrl {
u32 id; /* v4l2 control identifier */
enum bcm2835_mmal_ctrl_type type;
/* control minimum value or
* mask for MMAL_CONTROL_TYPE_STD_MENU
*/
s64 min;
s64 max; /* maximum value of control */
s64 def; /* default value of control */
u64 step; /* step size of the control */
const s64 *imenu; /* integer menu array */
u32 mmal_id; /* mmal parameter id */
int (*setter)(struct bcm2835_mmal_dev *dev, struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl);
};
struct v4l2_to_mmal_effects_setting {
u32 v4l2_effect;
u32 mmal_effect;
s32 col_fx_enable;
s32 col_fx_fixed_cbcr;
u32 u;
u32 v;
u32 num_effect_params;
u32 effect_params[MMAL_MAX_IMAGEFX_PARAMETERS];
};
static const struct v4l2_to_mmal_effects_setting
v4l2_to_mmal_effects_values[] = {
{ V4L2_COLORFX_NONE, MMAL_PARAM_IMAGEFX_NONE,
0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_BW, MMAL_PARAM_IMAGEFX_NONE,
1, 0, 128, 128, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_SEPIA, MMAL_PARAM_IMAGEFX_NONE,
1, 0, 87, 151, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_NEGATIVE, MMAL_PARAM_IMAGEFX_NEGATIVE,
0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_EMBOSS, MMAL_PARAM_IMAGEFX_EMBOSS,
0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_SKETCH, MMAL_PARAM_IMAGEFX_SKETCH,
0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_SKY_BLUE, MMAL_PARAM_IMAGEFX_PASTEL,
0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_GRASS_GREEN, MMAL_PARAM_IMAGEFX_WATERCOLOUR,
0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_SKIN_WHITEN, MMAL_PARAM_IMAGEFX_WASHEDOUT,
0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_VIVID, MMAL_PARAM_IMAGEFX_SATURATION,
0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_AQUA, MMAL_PARAM_IMAGEFX_NONE,
1, 0, 171, 121, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_ART_FREEZE, MMAL_PARAM_IMAGEFX_HATCH,
0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_SILHOUETTE, MMAL_PARAM_IMAGEFX_FILM,
0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
{ V4L2_COLORFX_SOLARIZATION, MMAL_PARAM_IMAGEFX_SOLARIZE,
0, 0, 0, 0, 5, {1, 128, 160, 160, 48} },
{ V4L2_COLORFX_ANTIQUE, MMAL_PARAM_IMAGEFX_COLOURBALANCE,
0, 0, 0, 0, 3, {108, 274, 238, 0, 0} },
{ V4L2_COLORFX_SET_CBCR, MMAL_PARAM_IMAGEFX_NONE,
1, 1, 0, 0, 0, {0, 0, 0, 0, 0} }
};
struct v4l2_mmal_scene_config {
enum v4l2_scene_mode v4l2_scene;
enum mmal_parameter_exposuremode exposure_mode;
enum mmal_parameter_exposuremeteringmode metering_mode;
};
static const struct v4l2_mmal_scene_config scene_configs[] = {
/* V4L2_SCENE_MODE_NONE automatically added */
{
V4L2_SCENE_MODE_NIGHT,
MMAL_PARAM_EXPOSUREMODE_NIGHT,
MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE
},
{
V4L2_SCENE_MODE_SPORTS,
MMAL_PARAM_EXPOSUREMODE_SPORTS,
MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE
},
};
/* control handlers*/
static int ctrl_set_rational(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
struct s32_fract rational_value;
struct vchiq_mmal_port *control;
control = &dev->component[COMP_CAMERA]->control;
rational_value.numerator = ctrl->val;
rational_value.denominator = 100;
return vchiq_mmal_port_parameter_set(dev->instance, control,
mmal_ctrl->mmal_id,
&rational_value,
sizeof(rational_value));
}
static int ctrl_set_value(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
control = &dev->component[COMP_CAMERA]->control;
u32_value = ctrl->val;
return vchiq_mmal_port_parameter_set(dev->instance, control,
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
}
static int ctrl_set_iso(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
if (ctrl->val > mmal_ctrl->max || ctrl->val < mmal_ctrl->min)
return 1;
if (ctrl->id == V4L2_CID_ISO_SENSITIVITY)
dev->iso = iso_values[ctrl->val];
else if (ctrl->id == V4L2_CID_ISO_SENSITIVITY_AUTO)
dev->manual_iso_enabled =
(ctrl->val == V4L2_ISO_SENSITIVITY_MANUAL);
control = &dev->component[COMP_CAMERA]->control;
if (dev->manual_iso_enabled)
u32_value = dev->iso;
else
u32_value = 0;
return vchiq_mmal_port_parameter_set(dev->instance, control,
MMAL_PARAMETER_ISO,
&u32_value, sizeof(u32_value));
}
static int ctrl_set_value_ev(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
s32 s32_value;
struct vchiq_mmal_port *control;
control = &dev->component[COMP_CAMERA]->control;
s32_value = (ctrl->val - 12) * 2; /* Convert from index to 1/6ths */
return vchiq_mmal_port_parameter_set(dev->instance, control,
mmal_ctrl->mmal_id,
&s32_value, sizeof(s32_value));
}
static int ctrl_set_rotate(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
u32 u32_value;
struct vchiq_mmal_component *camera;
camera = dev->component[COMP_CAMERA];
u32_value = ((ctrl->val % 360) / 90) * 90;
ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[0],
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
if (ret < 0)
return ret;
ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[1],
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
if (ret < 0)
return ret;
return vchiq_mmal_port_parameter_set(dev->instance, &camera->output[2],
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
}
static int ctrl_set_flip(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
u32 u32_value;
struct vchiq_mmal_component *camera;
if (ctrl->id == V4L2_CID_HFLIP)
dev->hflip = ctrl->val;
else
dev->vflip = ctrl->val;
camera = dev->component[COMP_CAMERA];
if (dev->hflip && dev->vflip)
u32_value = MMAL_PARAM_MIRROR_BOTH;
else if (dev->hflip)
u32_value = MMAL_PARAM_MIRROR_HORIZONTAL;
else if (dev->vflip)
u32_value = MMAL_PARAM_MIRROR_VERTICAL;
else
u32_value = MMAL_PARAM_MIRROR_NONE;
ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[0],
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
if (ret < 0)
return ret;
ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[1],
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
if (ret < 0)
return ret;
return vchiq_mmal_port_parameter_set(dev->instance, &camera->output[2],
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
}
static int ctrl_set_exposure(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
enum mmal_parameter_exposuremode exp_mode = dev->exposure_mode_user;
u32 shutter_speed = 0;
struct vchiq_mmal_port *control;
int ret = 0;
control = &dev->component[COMP_CAMERA]->control;
if (mmal_ctrl->mmal_id == MMAL_PARAMETER_SHUTTER_SPEED) {
/* V4L2 is in 100usec increments.
* MMAL is 1usec.
*/
dev->manual_shutter_speed = ctrl->val * 100;
} else if (mmal_ctrl->mmal_id == MMAL_PARAMETER_EXPOSURE_MODE) {
switch (ctrl->val) {
case V4L2_EXPOSURE_AUTO:
exp_mode = MMAL_PARAM_EXPOSUREMODE_AUTO;
break;
case V4L2_EXPOSURE_MANUAL:
exp_mode = MMAL_PARAM_EXPOSUREMODE_OFF;
break;
}
dev->exposure_mode_user = exp_mode;
dev->exposure_mode_v4l2_user = ctrl->val;
} else if (mmal_ctrl->id == V4L2_CID_EXPOSURE_AUTO_PRIORITY) {
dev->exp_auto_priority = ctrl->val;
}
if (dev->scene_mode == V4L2_SCENE_MODE_NONE) {
if (exp_mode == MMAL_PARAM_EXPOSUREMODE_OFF)
shutter_speed = dev->manual_shutter_speed;
ret = vchiq_mmal_port_parameter_set(dev->instance,
control,
MMAL_PARAMETER_SHUTTER_SPEED,
&shutter_speed,
sizeof(shutter_speed));
ret += vchiq_mmal_port_parameter_set(dev->instance,
control,
MMAL_PARAMETER_EXPOSURE_MODE,
&exp_mode,
sizeof(u32));
dev->exposure_mode_active = exp_mode;
}
/* exposure_dynamic_framerate (V4L2_CID_EXPOSURE_AUTO_PRIORITY) should
* always apply irrespective of scene mode.
*/
ret += set_framerate_params(dev);
return ret;
}
static int ctrl_set_metering_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
switch (ctrl->val) {
case V4L2_EXPOSURE_METERING_AVERAGE:
dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE;
break;
case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT;
break;
case V4L2_EXPOSURE_METERING_SPOT:
dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT;
break;
case V4L2_EXPOSURE_METERING_MATRIX:
dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX;
break;
}
if (dev->scene_mode == V4L2_SCENE_MODE_NONE) {
struct vchiq_mmal_port *control;
u32 u32_value = dev->metering_mode;
control = &dev->component[COMP_CAMERA]->control;
return vchiq_mmal_port_parameter_set(dev->instance, control,
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
} else {
return 0;
}
}
static int ctrl_set_flicker_avoidance(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
control = &dev->component[COMP_CAMERA]->control;
switch (ctrl->val) {
case V4L2_CID_POWER_LINE_FREQUENCY_DISABLED:
u32_value = MMAL_PARAM_FLICKERAVOID_OFF;
break;
case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
u32_value = MMAL_PARAM_FLICKERAVOID_50HZ;
break;
case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
u32_value = MMAL_PARAM_FLICKERAVOID_60HZ;
break;
case V4L2_CID_POWER_LINE_FREQUENCY_AUTO:
u32_value = MMAL_PARAM_FLICKERAVOID_AUTO;
break;
}
return vchiq_mmal_port_parameter_set(dev->instance, control,
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
}
static int ctrl_set_awb_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
control = &dev->component[COMP_CAMERA]->control;
switch (ctrl->val) {
case V4L2_WHITE_BALANCE_MANUAL:
u32_value = MMAL_PARAM_AWBMODE_OFF;
break;
case V4L2_WHITE_BALANCE_AUTO:
u32_value = MMAL_PARAM_AWBMODE_AUTO;
break;
case V4L2_WHITE_BALANCE_INCANDESCENT:
u32_value = MMAL_PARAM_AWBMODE_INCANDESCENT;
break;
case V4L2_WHITE_BALANCE_FLUORESCENT:
u32_value = MMAL_PARAM_AWBMODE_FLUORESCENT;
break;
case V4L2_WHITE_BALANCE_FLUORESCENT_H:
u32_value = MMAL_PARAM_AWBMODE_TUNGSTEN;
break;
case V4L2_WHITE_BALANCE_HORIZON:
u32_value = MMAL_PARAM_AWBMODE_HORIZON;
break;
case V4L2_WHITE_BALANCE_DAYLIGHT:
u32_value = MMAL_PARAM_AWBMODE_SUNLIGHT;
break;
case V4L2_WHITE_BALANCE_FLASH:
u32_value = MMAL_PARAM_AWBMODE_FLASH;
break;
case V4L2_WHITE_BALANCE_CLOUDY:
u32_value = MMAL_PARAM_AWBMODE_CLOUDY;
break;
case V4L2_WHITE_BALANCE_SHADE:
u32_value = MMAL_PARAM_AWBMODE_SHADE;
break;
}
return vchiq_mmal_port_parameter_set(dev->instance, control,
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
}
static int ctrl_set_awb_gains(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
struct vchiq_mmal_port *control;
struct mmal_parameter_awbgains gains;
control = &dev->component[COMP_CAMERA]->control;
if (ctrl->id == V4L2_CID_RED_BALANCE)
dev->red_gain = ctrl->val;
else if (ctrl->id == V4L2_CID_BLUE_BALANCE)
dev->blue_gain = ctrl->val;
gains.r_gain.numerator = dev->red_gain;
gains.r_gain.denominator = 1000;
gains.b_gain.numerator = dev->blue_gain;
gains.b_gain.denominator = 1000;
return vchiq_mmal_port_parameter_set(dev->instance, control,
mmal_ctrl->mmal_id,
&gains, sizeof(gains));
}
static int ctrl_set_image_effect(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret = -EINVAL;
int i, j;
struct vchiq_mmal_port *control;
struct mmal_parameter_imagefx_parameters imagefx;
for (i = 0; i < ARRAY_SIZE(v4l2_to_mmal_effects_values); i++) {
if (ctrl->val != v4l2_to_mmal_effects_values[i].v4l2_effect)
continue;
imagefx.effect =
v4l2_to_mmal_effects_values[i].mmal_effect;
imagefx.num_effect_params =
v4l2_to_mmal_effects_values[i].num_effect_params;
if (imagefx.num_effect_params > MMAL_MAX_IMAGEFX_PARAMETERS)
imagefx.num_effect_params = MMAL_MAX_IMAGEFX_PARAMETERS;
for (j = 0; j < imagefx.num_effect_params; j++)
imagefx.effect_parameter[j] =
v4l2_to_mmal_effects_values[i].effect_params[j];
dev->colourfx.enable =
v4l2_to_mmal_effects_values[i].col_fx_enable;
if (!v4l2_to_mmal_effects_values[i].col_fx_fixed_cbcr) {
dev->colourfx.u = v4l2_to_mmal_effects_values[i].u;
dev->colourfx.v = v4l2_to_mmal_effects_values[i].v;
}
control = &dev->component[COMP_CAMERA]->control;
ret = vchiq_mmal_port_parameter_set(
dev->instance, control,
MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
&imagefx, sizeof(imagefx));
if (ret)
goto exit;
ret = vchiq_mmal_port_parameter_set(
dev->instance, control,
MMAL_PARAMETER_COLOUR_EFFECT,
&dev->colourfx, sizeof(dev->colourfx));
}
exit:
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"mmal_ctrl:%p ctrl id:0x%x ctrl val:%d imagefx:0x%x color_effect:%s u:%d v:%d ret %d(%d)\n",
mmal_ctrl, ctrl->id, ctrl->val, imagefx.effect,
dev->colourfx.enable ? "true" : "false",
dev->colourfx.u, dev->colourfx.v,
ret, (ret == 0 ? 0 : -EINVAL));
return (ret == 0 ? 0 : -EINVAL);
}
static int ctrl_set_colfx(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
struct vchiq_mmal_port *control;
control = &dev->component[COMP_CAMERA]->control;
dev->colourfx.u = (ctrl->val & 0xff00) >> 8;
dev->colourfx.v = ctrl->val & 0xff;
ret = vchiq_mmal_port_parameter_set(dev->instance, control,
MMAL_PARAMETER_COLOUR_EFFECT,
&dev->colourfx,
sizeof(dev->colourfx));
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n",
__func__, mmal_ctrl, ctrl->id, ctrl->val, ret,
(ret == 0 ? 0 : -EINVAL));
return (ret == 0 ? 0 : -EINVAL);
}
static int ctrl_set_bitrate(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
struct vchiq_mmal_port *encoder_out;
dev->capture.encode_bitrate = ctrl->val;
encoder_out = &dev->component[COMP_VIDEO_ENCODE]->output[0];
ret = vchiq_mmal_port_parameter_set(dev->instance, encoder_out,
mmal_ctrl->mmal_id, &ctrl->val,
sizeof(ctrl->val));
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n",
__func__, mmal_ctrl, ctrl->id, ctrl->val, ret,
(ret == 0 ? 0 : -EINVAL));
/*
* Older firmware versions (pre July 2019) have a bug in handling
* MMAL_PARAMETER_VIDEO_BIT_RATE that result in the call
* returning -MMAL_MSG_STATUS_EINVAL. So ignore errors from this call.
*/
return 0;
}
static int ctrl_set_bitrate_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 bitrate_mode;
struct vchiq_mmal_port *encoder_out;
encoder_out = &dev->component[COMP_VIDEO_ENCODE]->output[0];
dev->capture.encode_bitrate_mode = ctrl->val;
switch (ctrl->val) {
default:
case V4L2_MPEG_VIDEO_BITRATE_MODE_VBR:
bitrate_mode = MMAL_VIDEO_RATECONTROL_VARIABLE;
break;
case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR:
bitrate_mode = MMAL_VIDEO_RATECONTROL_CONSTANT;
break;
}
vchiq_mmal_port_parameter_set(dev->instance, encoder_out,
mmal_ctrl->mmal_id,
&bitrate_mode,
sizeof(bitrate_mode));
return 0;
}
static int ctrl_set_image_encode_output(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *jpeg_out;
jpeg_out = &dev->component[COMP_IMAGE_ENCODE]->output[0];
u32_value = ctrl->val;
return vchiq_mmal_port_parameter_set(dev->instance, jpeg_out,
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
}
static int ctrl_set_video_encode_param_output(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *vid_enc_ctl;
vid_enc_ctl = &dev->component[COMP_VIDEO_ENCODE]->output[0];
u32_value = ctrl->val;
return vchiq_mmal_port_parameter_set(dev->instance, vid_enc_ctl,
mmal_ctrl->mmal_id,
&u32_value, sizeof(u32_value));
}
static int ctrl_set_video_encode_profile_level(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
struct mmal_parameter_video_profile param;
int ret = 0;
if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_PROFILE) {
switch (ctrl->val) {
case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
dev->capture.enc_profile = ctrl->val;
break;
default:
ret = -EINVAL;
break;
}
} else if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_LEVEL) {
switch (ctrl->val) {
case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
dev->capture.enc_level = ctrl->val;
break;
default:
ret = -EINVAL;
break;
}
}
if (!ret) {
switch (dev->capture.enc_profile) {
case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
param.profile = MMAL_VIDEO_PROFILE_H264_BASELINE;
break;
case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
param.profile =
MMAL_VIDEO_PROFILE_H264_CONSTRAINED_BASELINE;
break;
case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
param.profile = MMAL_VIDEO_PROFILE_H264_MAIN;
break;
case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
param.profile = MMAL_VIDEO_PROFILE_H264_HIGH;
break;
default:
/* Should never get here */
break;
}
switch (dev->capture.enc_level) {
case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
param.level = MMAL_VIDEO_LEVEL_H264_1;
break;
case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
param.level = MMAL_VIDEO_LEVEL_H264_1b;
break;
case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
param.level = MMAL_VIDEO_LEVEL_H264_11;
break;
case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
param.level = MMAL_VIDEO_LEVEL_H264_12;
break;
case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
param.level = MMAL_VIDEO_LEVEL_H264_13;
break;
case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
param.level = MMAL_VIDEO_LEVEL_H264_2;
break;
case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
param.level = MMAL_VIDEO_LEVEL_H264_21;
break;
case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
param.level = MMAL_VIDEO_LEVEL_H264_22;
break;
case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
param.level = MMAL_VIDEO_LEVEL_H264_3;
break;
case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
param.level = MMAL_VIDEO_LEVEL_H264_31;
break;
case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
param.level = MMAL_VIDEO_LEVEL_H264_32;
break;
case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
param.level = MMAL_VIDEO_LEVEL_H264_4;
break;
default:
/* Should never get here */
break;
}
ret = vchiq_mmal_port_parameter_set(dev->instance,
&dev->component[COMP_VIDEO_ENCODE]->output[0],
mmal_ctrl->mmal_id,
¶m, sizeof(param));
}
return ret;
}
static int ctrl_set_scene_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret = 0;
int shutter_speed;
struct vchiq_mmal_port *control;
v4l2_dbg(0, bcm2835_v4l2_debug, &dev->v4l2_dev,
"scene mode selected %d, was %d\n", ctrl->val,
dev->scene_mode);
control = &dev->component[COMP_CAMERA]->control;
if (ctrl->val == dev->scene_mode)
return 0;
if (ctrl->val == V4L2_SCENE_MODE_NONE) {
/* Restore all user selections */
dev->scene_mode = V4L2_SCENE_MODE_NONE;
if (dev->exposure_mode_user == MMAL_PARAM_EXPOSUREMODE_OFF)
shutter_speed = dev->manual_shutter_speed;
else
shutter_speed = 0;
v4l2_dbg(0, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s: scene mode none: shut_speed %d, exp_mode %d, metering %d\n",
__func__, shutter_speed, dev->exposure_mode_user,
dev->metering_mode);
ret = vchiq_mmal_port_parameter_set(dev->instance,
control,
MMAL_PARAMETER_SHUTTER_SPEED,
&shutter_speed,
sizeof(shutter_speed));
ret += vchiq_mmal_port_parameter_set(dev->instance,
control,
MMAL_PARAMETER_EXPOSURE_MODE,
&dev->exposure_mode_user,
sizeof(u32));
dev->exposure_mode_active = dev->exposure_mode_user;
ret += vchiq_mmal_port_parameter_set(dev->instance,
control,
MMAL_PARAMETER_EXP_METERING_MODE,
&dev->metering_mode,
sizeof(u32));
ret += set_framerate_params(dev);
} else {
/* Set up scene mode */
int i;
const struct v4l2_mmal_scene_config *scene = NULL;
int shutter_speed;
enum mmal_parameter_exposuremode exposure_mode;
enum mmal_parameter_exposuremeteringmode metering_mode;
for (i = 0; i < ARRAY_SIZE(scene_configs); i++) {
if (scene_configs[i].v4l2_scene == ctrl->val) {
scene = &scene_configs[i];
break;
}
}
if (!scene)
return -EINVAL;
if (i >= ARRAY_SIZE(scene_configs))
return -EINVAL;
/* Set all the values */
dev->scene_mode = ctrl->val;
if (scene->exposure_mode == MMAL_PARAM_EXPOSUREMODE_OFF)
shutter_speed = dev->manual_shutter_speed;
else
shutter_speed = 0;
exposure_mode = scene->exposure_mode;
metering_mode = scene->metering_mode;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s: scene mode none: shut_speed %d, exp_mode %d, metering %d\n",
__func__, shutter_speed, exposure_mode, metering_mode);
ret = vchiq_mmal_port_parameter_set(dev->instance, control,
MMAL_PARAMETER_SHUTTER_SPEED,
&shutter_speed,
sizeof(shutter_speed));
ret += vchiq_mmal_port_parameter_set(dev->instance, control,
MMAL_PARAMETER_EXPOSURE_MODE,
&exposure_mode,
sizeof(u32));
dev->exposure_mode_active = exposure_mode;
ret += vchiq_mmal_port_parameter_set(dev->instance, control,
MMAL_PARAMETER_EXPOSURE_MODE,
&exposure_mode,
sizeof(u32));
ret += vchiq_mmal_port_parameter_set(dev->instance, control,
MMAL_PARAMETER_EXP_METERING_MODE,
&metering_mode,
sizeof(u32));
ret += set_framerate_params(dev);
}
if (ret) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"%s: Setting scene to %d, ret=%d\n",
__func__, ctrl->val, ret);
ret = -EINVAL;
}
return 0;
}
static int bcm2835_mmal_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct bcm2835_mmal_dev *dev = container_of(ctrl->handler, struct bcm2835_mmal_dev,
ctrl_handler);
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl = ctrl->priv;
int ret;
if (!mmal_ctrl || mmal_ctrl->id != ctrl->id || !mmal_ctrl->setter) {
pr_warn("mmal_ctrl:%p ctrl id:%d\n", mmal_ctrl, ctrl->id);
return -EINVAL;
}
ret = mmal_ctrl->setter(dev, ctrl, mmal_ctrl);
if (ret)
pr_warn("ctrl id:%d/MMAL param %08X- returned ret %d\n",
ctrl->id, mmal_ctrl->mmal_id, ret);
return ret;
}
static const struct v4l2_ctrl_ops bcm2835_mmal_ctrl_ops = {
.s_ctrl = bcm2835_mmal_s_ctrl,
};
static const struct bcm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
{
.id = V4L2_CID_SATURATION,
.type = MMAL_CONTROL_TYPE_STD,
.min = -100,
.max = 100,
.def = 0,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_SATURATION,
.setter = ctrl_set_rational,
},
{
.id = V4L2_CID_SHARPNESS,
.type = MMAL_CONTROL_TYPE_STD,
.min = -100,
.max = 100,
.def = 0,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_SHARPNESS,
.setter = ctrl_set_rational,
},
{
.id = V4L2_CID_CONTRAST,
.type = MMAL_CONTROL_TYPE_STD,
.min = -100,
.max = 100,
.def = 0,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_CONTRAST,
.setter = ctrl_set_rational,
},
{
.id = V4L2_CID_BRIGHTNESS,
.type = MMAL_CONTROL_TYPE_STD,
.min = 0,
.max = 100,
.def = 50,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_BRIGHTNESS,
.setter = ctrl_set_rational,
},
{
.id = V4L2_CID_ISO_SENSITIVITY,
.type = MMAL_CONTROL_TYPE_INT_MENU,
.min = 0,
.max = ARRAY_SIZE(iso_qmenu) - 1,
.def = 0,
.step = 1,
.imenu = iso_qmenu,
.mmal_id = MMAL_PARAMETER_ISO,
.setter = ctrl_set_iso,
},
{
.id = V4L2_CID_ISO_SENSITIVITY_AUTO,
.type = MMAL_CONTROL_TYPE_STD_MENU,
.min = 0,
.max = V4L2_ISO_SENSITIVITY_AUTO,
.def = V4L2_ISO_SENSITIVITY_AUTO,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_ISO,
.setter = ctrl_set_iso,
},
{
.id = V4L2_CID_IMAGE_STABILIZATION,
.type = MMAL_CONTROL_TYPE_STD,
.min = 0,
.max = 1,
.def = 0,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_VIDEO_STABILISATION,
.setter = ctrl_set_value,
},
{
.id = V4L2_CID_EXPOSURE_AUTO,
.type = MMAL_CONTROL_TYPE_STD_MENU,
.min = ~0x03,
.max = V4L2_EXPOSURE_APERTURE_PRIORITY,
.def = V4L2_EXPOSURE_AUTO,
.step = 0,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_EXPOSURE_MODE,
.setter = ctrl_set_exposure,
},
{
.id = V4L2_CID_EXPOSURE_ABSOLUTE,
.type = MMAL_CONTROL_TYPE_STD,
/* Units of 100usecs */
.min = 1,
.max = 1 * 1000 * 10,
.def = 100 * 10,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_SHUTTER_SPEED,
.setter = ctrl_set_exposure,
},
{
.id = V4L2_CID_AUTO_EXPOSURE_BIAS,
.type = MMAL_CONTROL_TYPE_INT_MENU,
.min = 0,
.max = ARRAY_SIZE(ev_bias_qmenu) - 1,
.def = (ARRAY_SIZE(ev_bias_qmenu) + 1) / 2 - 1,
.step = 0,
.imenu = ev_bias_qmenu,
.mmal_id = MMAL_PARAMETER_EXPOSURE_COMP,
.setter = ctrl_set_value_ev,
},
{
.id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
.type = MMAL_CONTROL_TYPE_STD,
.min = 0,
.max = 1,
.def = 0,
.step = 1,
.imenu = NULL,
/* Dummy MMAL ID as it gets mapped into FPS range */
.mmal_id = 0,
.setter = ctrl_set_exposure,
},
{
.id = V4L2_CID_EXPOSURE_METERING,
.type = MMAL_CONTROL_TYPE_STD_MENU,
.min = ~0xf,
.max = V4L2_EXPOSURE_METERING_MATRIX,
.def = V4L2_EXPOSURE_METERING_AVERAGE,
.step = 0,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_EXP_METERING_MODE,
.setter = ctrl_set_metering_mode,
},
{
.id = V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
.type = MMAL_CONTROL_TYPE_STD_MENU,
.min = ~0x3ff,
.max = V4L2_WHITE_BALANCE_SHADE,
.def = V4L2_WHITE_BALANCE_AUTO,
.step = 0,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_AWB_MODE,
.setter = ctrl_set_awb_mode,
},
{
.id = V4L2_CID_RED_BALANCE,
.type = MMAL_CONTROL_TYPE_STD,
.min = 1,
.max = 7999,
.def = 1000,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_CUSTOM_AWB_GAINS,
.setter = ctrl_set_awb_gains,
},
{
.id = V4L2_CID_BLUE_BALANCE,
.type = MMAL_CONTROL_TYPE_STD,
.min = 1,
.max = 7999,
.def = 1000,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_CUSTOM_AWB_GAINS,
.setter = ctrl_set_awb_gains,
},
{
.id = V4L2_CID_COLORFX,
.type = MMAL_CONTROL_TYPE_STD_MENU,
.min = 0,
.max = V4L2_COLORFX_SET_CBCR,
.def = V4L2_COLORFX_NONE,
.step = 0,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_IMAGE_EFFECT,
.setter = ctrl_set_image_effect,
},
{
.id = V4L2_CID_COLORFX_CBCR,
.type = MMAL_CONTROL_TYPE_STD,
.min = 0,
.max = 0xffff,
.def = 0x8080,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_COLOUR_EFFECT,
.setter = ctrl_set_colfx,
},
{
.id = V4L2_CID_ROTATE,
.type = MMAL_CONTROL_TYPE_STD,
.min = 0,
.max = 360,
.def = 0,
.step = 90,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_ROTATION,
.setter = ctrl_set_rotate,
},
{
.id = V4L2_CID_HFLIP,
.type = MMAL_CONTROL_TYPE_STD,
.min = 0,
.max = 1,
.def = 0,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_MIRROR,
.setter = ctrl_set_flip,
},
{
.id = V4L2_CID_VFLIP,
.type = MMAL_CONTROL_TYPE_STD,
.min = 0,
.max = 1,
.def = 0,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_MIRROR,
.setter = ctrl_set_flip,
},
{
.id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
.type = MMAL_CONTROL_TYPE_STD_MENU,
.min = 0,
.max = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
.def = 0,
.step = 0,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_RATECONTROL,
.setter = ctrl_set_bitrate_mode,
},
{
.id = V4L2_CID_MPEG_VIDEO_BITRATE,
.type = MMAL_CONTROL_TYPE_STD,
.min = 25 * 1000,
.max = 25 * 1000 * 1000,
.def = 10 * 1000 * 1000,
.step = 25 * 1000,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_VIDEO_BIT_RATE,
.setter = ctrl_set_bitrate,
},
{
.id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
.type = MMAL_CONTROL_TYPE_STD,
.min = 1,
.max = 100,
.def = 30,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_JPEG_Q_FACTOR,
.setter = ctrl_set_image_encode_output,
},
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
.type = MMAL_CONTROL_TYPE_STD_MENU,
.min = 0,
.max = V4L2_CID_POWER_LINE_FREQUENCY_AUTO,
.def = 1,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_FLICKER_AVOID,
.setter = ctrl_set_flicker_avoidance,
},
{
.id = V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER,
.type = MMAL_CONTROL_TYPE_STD,
.min = 0,
.max = 1,
.def = 0,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER,
.setter = ctrl_set_video_encode_param_output,
},
{
.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
.type = MMAL_CONTROL_TYPE_STD_MENU,
.min = ~(BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
.max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
.def = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_PROFILE,
.setter = ctrl_set_video_encode_profile_level,
},
{
.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
.type = MMAL_CONTROL_TYPE_STD_MENU,
.min = ~(BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0)),
.max = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
.def = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_PROFILE,
.setter = ctrl_set_video_encode_profile_level,
},
{
.id = V4L2_CID_SCENE_MODE,
.type = MMAL_CONTROL_TYPE_STD_MENU,
/* mask is computed at runtime */
.min = -1,
.max = V4L2_SCENE_MODE_TEXT,
.def = V4L2_SCENE_MODE_NONE,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_PROFILE,
.setter = ctrl_set_scene_mode,
},
{
.id = V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
.type = MMAL_CONTROL_TYPE_STD,
.min = 0,
.max = 0x7FFFFFFF,
.def = 60,
.step = 1,
.imenu = NULL,
.mmal_id = MMAL_PARAMETER_INTRAPERIOD,
.setter = ctrl_set_video_encode_param_output,
},
};
int bcm2835_mmal_set_all_camera_controls(struct bcm2835_mmal_dev *dev)
{
int c;
int ret = 0;
for (c = 0; c < V4L2_CTRL_COUNT; c++) {
if ((dev->ctrls[c]) && (v4l2_ctrls[c].setter)) {
ret = v4l2_ctrls[c].setter(dev, dev->ctrls[c],
&v4l2_ctrls[c]);
if (ret) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Failed when setting default values for ctrl %d\n",
c);
break;
}
}
}
return ret;
}
int set_framerate_params(struct bcm2835_mmal_dev *dev)
{
struct mmal_parameter_fps_range fps_range;
int ret;
fps_range.fps_high.numerator = dev->capture.timeperframe.denominator;
fps_range.fps_high.denominator = dev->capture.timeperframe.numerator;
if ((dev->exposure_mode_active != MMAL_PARAM_EXPOSUREMODE_OFF) &&
(dev->exp_auto_priority)) {
/* Variable FPS. Define min FPS as 1fps. */
fps_range.fps_low.numerator = 1;
fps_range.fps_low.denominator = 1;
} else {
/* Fixed FPS - set min and max to be the same */
fps_range.fps_low.numerator = fps_range.fps_high.numerator;
fps_range.fps_low.denominator = fps_range.fps_high.denominator;
}
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Set fps range to %d/%d to %d/%d\n",
fps_range.fps_low.numerator,
fps_range.fps_low.denominator,
fps_range.fps_high.numerator,
fps_range.fps_high.denominator);
ret = vchiq_mmal_port_parameter_set(dev->instance,
&dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW],
MMAL_PARAMETER_FPS_RANGE,
&fps_range, sizeof(fps_range));
ret += vchiq_mmal_port_parameter_set(dev->instance,
&dev->component[COMP_CAMERA]->output[CAM_PORT_VIDEO],
MMAL_PARAMETER_FPS_RANGE,
&fps_range, sizeof(fps_range));
ret += vchiq_mmal_port_parameter_set(dev->instance,
&dev->component[COMP_CAMERA]->output[CAM_PORT_CAPTURE],
MMAL_PARAMETER_FPS_RANGE,
&fps_range, sizeof(fps_range));
if (ret)
v4l2_dbg(0, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Failed to set fps ret %d\n", ret);
return ret;
}
int bcm2835_mmal_init_controls(struct bcm2835_mmal_dev *dev, struct v4l2_ctrl_handler *hdl)
{
int c;
const struct bcm2835_mmal_v4l2_ctrl *ctrl;
v4l2_ctrl_handler_init(hdl, V4L2_CTRL_COUNT);
for (c = 0; c < V4L2_CTRL_COUNT; c++) {
ctrl = &v4l2_ctrls[c];
switch (ctrl->type) {
case MMAL_CONTROL_TYPE_STD:
dev->ctrls[c] = v4l2_ctrl_new_std(hdl, &bcm2835_mmal_ctrl_ops,
ctrl->id, ctrl->min, ctrl->max,
ctrl->step, ctrl->def);
break;
case MMAL_CONTROL_TYPE_STD_MENU:
{
u64 mask = ctrl->min;
if (ctrl->id == V4L2_CID_SCENE_MODE) {
/* Special handling to work out the mask
* value based on the scene_configs array
* at runtime. Reduces the chance of
* mismatches.
*/
int i;
mask = BIT(V4L2_SCENE_MODE_NONE);
for (i = 0;
i < ARRAY_SIZE(scene_configs);
i++) {
mask |= BIT(scene_configs[i].v4l2_scene);
}
mask = ~mask;
}
dev->ctrls[c] = v4l2_ctrl_new_std_menu(hdl, &bcm2835_mmal_ctrl_ops,
ctrl->id, ctrl->max, mask,
ctrl->def);
break;
}
case MMAL_CONTROL_TYPE_INT_MENU:
dev->ctrls[c] = v4l2_ctrl_new_int_menu(hdl, &bcm2835_mmal_ctrl_ops,
ctrl->id, ctrl->max,
ctrl->def, ctrl->imenu);
break;
case MMAL_CONTROL_TYPE_CLUSTER:
/* skip this entry when constructing controls */
continue;
}
if (hdl->error)
break;
dev->ctrls[c]->priv = (void *)ctrl;
}
if (hdl->error) {
pr_err("error adding control %d/%d id 0x%x\n", c,
V4L2_CTRL_COUNT, ctrl->id);
return hdl->error;
}
for (c = 0; c < V4L2_CTRL_COUNT; c++) {
ctrl = &v4l2_ctrls[c];
switch (ctrl->type) {
case MMAL_CONTROL_TYPE_CLUSTER:
v4l2_ctrl_auto_cluster(ctrl->min,
&dev->ctrls[c + 1],
ctrl->max,
ctrl->def);
break;
case MMAL_CONTROL_TYPE_STD:
case MMAL_CONTROL_TYPE_STD_MENU:
case MMAL_CONTROL_TYPE_INT_MENU:
break;
}
}
return 0;
}
| linux-master | drivers/staging/vc04_services/bcm2835-camera/controls.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
* Authors: Vincent Sanders @ Collabora
* Dave Stevenson @ Broadcom
* (now [email protected])
* Simon Mellor @ Broadcom
* Luke Diamand @ Broadcom
*
* V4L2 driver MMAL vchiq interface code
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <media/videobuf2-vmalloc.h>
#include "../include/linux/raspberrypi/vchiq.h"
#include "mmal-common.h"
#include "mmal-vchiq.h"
#include "mmal-msg.h"
/*
* maximum number of components supported.
* This matches the maximum permitted by default on the VPU
*/
#define VCHIQ_MMAL_MAX_COMPONENTS 64
/*
* Timeout for synchronous msg responses in seconds.
* Helpful to increase this if stopping in the VPU debugger.
*/
#define SYNC_MSG_TIMEOUT 3
/*#define FULL_MSG_DUMP 1*/
#ifdef DEBUG
static const char *const msg_type_names[] = {
"UNKNOWN",
"QUIT",
"SERVICE_CLOSED",
"GET_VERSION",
"COMPONENT_CREATE",
"COMPONENT_DESTROY",
"COMPONENT_ENABLE",
"COMPONENT_DISABLE",
"PORT_INFO_GET",
"PORT_INFO_SET",
"PORT_ACTION",
"BUFFER_FROM_HOST",
"BUFFER_TO_HOST",
"GET_STATS",
"PORT_PARAMETER_SET",
"PORT_PARAMETER_GET",
"EVENT_TO_HOST",
"GET_CORE_STATS_FOR_PORT",
"OPAQUE_ALLOCATOR",
"CONSUME_MEM",
"LMK",
"OPAQUE_ALLOCATOR_DESC",
"DRM_GET_LHS32",
"DRM_GET_TIME",
"BUFFER_FROM_HOST_ZEROLEN",
"PORT_FLUSH",
"HOST_LOG",
};
#endif
static const char *const port_action_type_names[] = {
"UNKNOWN",
"ENABLE",
"DISABLE",
"FLUSH",
"CONNECT",
"DISCONNECT",
"SET_REQUIREMENTS",
};
#if defined(DEBUG)
#if defined(FULL_MSG_DUMP)
#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
do { \
pr_debug(TITLE" type:%s(%d) length:%d\n", \
msg_type_names[(MSG)->h.type], \
(MSG)->h.type, (MSG_LEN)); \
print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
16, 4, (MSG), \
sizeof(struct mmal_msg_header), 1); \
print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
16, 4, \
((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
(MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
} while (0)
#else
#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
{ \
pr_debug(TITLE" type:%s(%d) length:%d\n", \
msg_type_names[(MSG)->h.type], \
(MSG)->h.type, (MSG_LEN)); \
}
#endif
#else
#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
#endif
struct vchiq_mmal_instance;
/* normal message context */
struct mmal_msg_context {
struct vchiq_mmal_instance *instance;
/* Index in the context_map idr so that we can find the
* mmal_msg_context again when servicing the VCHI reply.
*/
int handle;
union {
struct {
/* work struct for buffer_cb callback */
struct work_struct work;
/* work struct for deferred callback */
struct work_struct buffer_to_host_work;
/* mmal instance */
struct vchiq_mmal_instance *instance;
/* mmal port */
struct vchiq_mmal_port *port;
/* actual buffer used to store bulk reply */
struct mmal_buffer *buffer;
/* amount of buffer used */
unsigned long buffer_used;
/* MMAL buffer flags */
u32 mmal_flags;
/* Presentation and Decode timestamps */
s64 pts;
s64 dts;
int status; /* context status */
} bulk; /* bulk data */
struct {
/* message handle to release */
struct vchiq_header *msg_handle;
/* pointer to received message */
struct mmal_msg *msg;
/* received message length */
u32 msg_len;
/* completion upon reply */
struct completion cmplt;
} sync; /* synchronous response */
} u;
};
struct vchiq_mmal_instance {
unsigned int service_handle;
/* ensure serialised access to service */
struct mutex vchiq_mutex;
struct idr context_map;
/* protect accesses to context_map */
struct mutex context_map_lock;
struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
/* ordered workqueue to process all bulk operations */
struct workqueue_struct *bulk_wq;
/* handle for a vchiq instance */
struct vchiq_instance *vchiq_instance;
};
static struct mmal_msg_context *
get_msg_context(struct vchiq_mmal_instance *instance)
{
struct mmal_msg_context *msg_context;
int handle;
/* todo: should this be allocated from a pool to avoid kzalloc */
msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
if (!msg_context)
return ERR_PTR(-ENOMEM);
/* Create an ID that will be passed along with our message so
* that when we service the VCHI reply, we can look up what
* message is being replied to.
*/
mutex_lock(&instance->context_map_lock);
handle = idr_alloc(&instance->context_map, msg_context,
0, 0, GFP_KERNEL);
mutex_unlock(&instance->context_map_lock);
if (handle < 0) {
kfree(msg_context);
return ERR_PTR(handle);
}
msg_context->instance = instance;
msg_context->handle = handle;
return msg_context;
}
static struct mmal_msg_context *
lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
{
return idr_find(&instance->context_map, handle);
}
static void
release_msg_context(struct mmal_msg_context *msg_context)
{
struct vchiq_mmal_instance *instance = msg_context->instance;
mutex_lock(&instance->context_map_lock);
idr_remove(&instance->context_map, msg_context->handle);
mutex_unlock(&instance->context_map_lock);
kfree(msg_context);
}
/* deals with receipt of event to host message */
static void event_to_host_cb(struct vchiq_mmal_instance *instance,
struct mmal_msg *msg, u32 msg_len)
{
pr_debug("unhandled event\n");
pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
msg->u.event_to_host.client_component,
msg->u.event_to_host.port_type,
msg->u.event_to_host.port_num,
msg->u.event_to_host.cmd, msg->u.event_to_host.length);
}
/* workqueue scheduled callback
*
* we do this because it is important we do not call any other vchiq
* sync calls from within the message delivery thread
*/
static void buffer_work_cb(struct work_struct *work)
{
struct mmal_msg_context *msg_context =
container_of(work, struct mmal_msg_context, u.bulk.work);
struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
if (!buffer) {
pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
__func__, msg_context);
return;
}
buffer->length = msg_context->u.bulk.buffer_used;
buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
buffer->dts = msg_context->u.bulk.dts;
buffer->pts = msg_context->u.bulk.pts;
atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
msg_context->u.bulk.port,
msg_context->u.bulk.status,
msg_context->u.bulk.buffer);
}
/* workqueue scheduled callback to handle receiving buffers
*
* VCHI will allow up to 4 bulk receives to be scheduled before blocking.
* If we block in the service_callback context then we can't process the
* VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
* vchiq_bulk_receive() call to complete.
*/
static void buffer_to_host_work_cb(struct work_struct *work)
{
struct mmal_msg_context *msg_context =
container_of(work, struct mmal_msg_context,
u.bulk.buffer_to_host_work);
struct vchiq_mmal_instance *instance = msg_context->instance;
unsigned long len = msg_context->u.bulk.buffer_used;
int ret;
if (!len)
/* Dummy receive to ensure the buffers remain in order */
len = 8;
/* queue the bulk submission */
vchiq_use_service(instance->vchiq_instance, instance->service_handle);
ret = vchiq_bulk_receive(instance->vchiq_instance, instance->service_handle,
msg_context->u.bulk.buffer->buffer,
/* Actual receive needs to be a multiple
* of 4 bytes
*/
(len + 3) & ~3,
msg_context,
VCHIQ_BULK_MODE_CALLBACK);
vchiq_release_service(instance->vchiq_instance, instance->service_handle);
if (ret != 0)
pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n",
__func__, msg_context, ret);
}
/* enqueue a bulk receive for a given message context */
static int bulk_receive(struct vchiq_mmal_instance *instance,
struct mmal_msg *msg,
struct mmal_msg_context *msg_context)
{
unsigned long rd_len;
rd_len = msg->u.buffer_from_host.buffer_header.length;
if (!msg_context->u.bulk.buffer) {
pr_err("bulk.buffer not configured - error in buffer_from_host\n");
/* todo: this is a serious error, we should never have
* committed a buffer_to_host operation to the mmal
* port without the buffer to back it up (underflow
* handling) and there is no obvious way to deal with
* this - how is the mmal servie going to react when
* we fail to do the xfer and reschedule a buffer when
* it arrives? perhaps a starved flag to indicate a
* waiting bulk receive?
*/
return -EINVAL;
}
/* ensure we do not overrun the available buffer */
if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
rd_len = msg_context->u.bulk.buffer->buffer_size;
pr_warn("short read as not enough receive buffer space\n");
/* todo: is this the correct response, what happens to
* the rest of the message data?
*/
}
/* store length */
msg_context->u.bulk.buffer_used = rd_len;
msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
queue_work(msg_context->instance->bulk_wq,
&msg_context->u.bulk.buffer_to_host_work);
return 0;
}
/* data in message, memcpy from packet into output buffer */
static int inline_receive(struct vchiq_mmal_instance *instance,
struct mmal_msg *msg,
struct mmal_msg_context *msg_context)
{
memcpy(msg_context->u.bulk.buffer->buffer,
msg->u.buffer_from_host.short_data,
msg->u.buffer_from_host.payload_in_message);
msg_context->u.bulk.buffer_used =
msg->u.buffer_from_host.payload_in_message;
return 0;
}
/* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
static int
buffer_from_host(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port, struct mmal_buffer *buf)
{
struct mmal_msg_context *msg_context;
struct mmal_msg m;
int ret;
if (!port->enabled)
return -EINVAL;
pr_debug("instance:%u buffer:%p\n", instance->service_handle, buf);
/* get context */
if (!buf->msg_context) {
pr_err("%s: msg_context not allocated, buf %p\n", __func__,
buf);
return -EINVAL;
}
msg_context = buf->msg_context;
/* store bulk message context for when data arrives */
msg_context->u.bulk.instance = instance;
msg_context->u.bulk.port = port;
msg_context->u.bulk.buffer = buf;
msg_context->u.bulk.buffer_used = 0;
/* initialise work structure ready to schedule callback */
INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
buffer_to_host_work_cb);
atomic_inc(&port->buffers_with_vpu);
/* prep the buffer from host message */
memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
m.h.magic = MMAL_MAGIC;
m.h.context = msg_context->handle;
m.h.status = 0;
/* drvbuf is our private data passed back */
m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
m.u.buffer_from_host.drvbuf.port_handle = port->handle;
m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
/* buffer header */
m.u.buffer_from_host.buffer_header.cmd = 0;
m.u.buffer_from_host.buffer_header.data =
(u32)(unsigned long)buf->buffer;
m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
m.u.buffer_from_host.buffer_header.length = 0; /* nothing used yet */
m.u.buffer_from_host.buffer_header.offset = 0; /* no offset */
m.u.buffer_from_host.buffer_header.flags = 0; /* no flags */
m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
/* clear buffer type specific data */
memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
sizeof(m.u.buffer_from_host.buffer_header_type_specific));
/* no payload in message */
m.u.buffer_from_host.payload_in_message = 0;
vchiq_use_service(instance->vchiq_instance, instance->service_handle);
ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, &m,
sizeof(struct mmal_msg_header) +
sizeof(m.u.buffer_from_host));
if (ret)
atomic_dec(&port->buffers_with_vpu);
vchiq_release_service(instance->vchiq_instance, instance->service_handle);
return ret;
}
/* deals with receipt of buffer to host message */
static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
struct mmal_msg *msg, u32 msg_len)
{
struct mmal_msg_context *msg_context;
u32 handle;
pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
__func__, instance, msg, msg_len);
if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
handle = msg->u.buffer_from_host.drvbuf.client_context;
msg_context = lookup_msg_context(instance, handle);
if (!msg_context) {
pr_err("drvbuf.client_context(%u) is invalid\n",
handle);
return;
}
} else {
pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
return;
}
msg_context->u.bulk.mmal_flags =
msg->u.buffer_from_host.buffer_header.flags;
if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
/* message reception had an error */
pr_warn("error %d in reply\n", msg->h.status);
msg_context->u.bulk.status = msg->h.status;
} else if (msg->u.buffer_from_host.buffer_header.length == 0) {
/* empty buffer */
if (msg->u.buffer_from_host.buffer_header.flags &
MMAL_BUFFER_HEADER_FLAG_EOS) {
msg_context->u.bulk.status =
bulk_receive(instance, msg, msg_context);
if (msg_context->u.bulk.status == 0)
return; /* successful bulk submission, bulk
* completion will trigger callback
*/
} else {
/* do callback with empty buffer - not EOS though */
msg_context->u.bulk.status = 0;
msg_context->u.bulk.buffer_used = 0;
}
} else if (msg->u.buffer_from_host.payload_in_message == 0) {
/* data is not in message, queue a bulk receive */
msg_context->u.bulk.status =
bulk_receive(instance, msg, msg_context);
if (msg_context->u.bulk.status == 0)
return; /* successful bulk submission, bulk
* completion will trigger callback
*/
/* failed to submit buffer, this will end badly */
pr_err("error %d on bulk submission\n",
msg_context->u.bulk.status);
} else if (msg->u.buffer_from_host.payload_in_message <=
MMAL_VC_SHORT_DATA) {
/* data payload within message */
msg_context->u.bulk.status = inline_receive(instance, msg,
msg_context);
} else {
pr_err("message with invalid short payload\n");
/* signal error */
msg_context->u.bulk.status = -EINVAL;
msg_context->u.bulk.buffer_used =
msg->u.buffer_from_host.payload_in_message;
}
/* schedule the port callback */
schedule_work(&msg_context->u.bulk.work);
}
static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
struct mmal_msg_context *msg_context)
{
msg_context->u.bulk.status = 0;
/* schedule the port callback */
schedule_work(&msg_context->u.bulk.work);
}
static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
struct mmal_msg_context *msg_context)
{
pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
msg_context->u.bulk.status = -EINTR;
schedule_work(&msg_context->u.bulk.work);
}
/* incoming event service callback */
static int service_callback(struct vchiq_instance *vchiq_instance,
enum vchiq_reason reason, struct vchiq_header *header,
unsigned int handle, void *bulk_ctx)
{
struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(vchiq_instance, handle);
u32 msg_len;
struct mmal_msg *msg;
struct mmal_msg_context *msg_context;
if (!instance) {
pr_err("Message callback passed NULL instance\n");
return 0;
}
switch (reason) {
case VCHIQ_MESSAGE_AVAILABLE:
msg = (void *)header->data;
msg_len = header->size;
DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
/* handling is different for buffer messages */
switch (msg->h.type) {
case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
vchiq_release_message(vchiq_instance, handle, header);
break;
case MMAL_MSG_TYPE_EVENT_TO_HOST:
event_to_host_cb(instance, msg, msg_len);
vchiq_release_message(vchiq_instance, handle, header);
break;
case MMAL_MSG_TYPE_BUFFER_TO_HOST:
buffer_to_host_cb(instance, msg, msg_len);
vchiq_release_message(vchiq_instance, handle, header);
break;
default:
/* messages dependent on header context to complete */
if (!msg->h.context) {
pr_err("received message context was null!\n");
vchiq_release_message(vchiq_instance, handle, header);
break;
}
msg_context = lookup_msg_context(instance,
msg->h.context);
if (!msg_context) {
pr_err("received invalid message context %u!\n",
msg->h.context);
vchiq_release_message(vchiq_instance, handle, header);
break;
}
/* fill in context values */
msg_context->u.sync.msg_handle = header;
msg_context->u.sync.msg = msg;
msg_context->u.sync.msg_len = msg_len;
/* todo: should this check (completion_done()
* == 1) for no one waiting? or do we need a
* flag to tell us the completion has been
* interrupted so we can free the message and
* its context. This probably also solves the
* message arriving after interruption todo
* below
*/
/* complete message so caller knows it happened */
complete(&msg_context->u.sync.cmplt);
break;
}
break;
case VCHIQ_BULK_RECEIVE_DONE:
bulk_receive_cb(instance, bulk_ctx);
break;
case VCHIQ_BULK_RECEIVE_ABORTED:
bulk_abort_cb(instance, bulk_ctx);
break;
case VCHIQ_SERVICE_CLOSED:
/* TODO: consider if this requires action if received when
* driver is not explicitly closing the service
*/
break;
default:
pr_err("Received unhandled message reason %d\n", reason);
break;
}
return 0;
}
static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
struct mmal_msg *msg,
unsigned int payload_len,
struct mmal_msg **msg_out,
struct vchiq_header **msg_handle)
{
struct mmal_msg_context *msg_context;
int ret;
unsigned long timeout;
/* payload size must not cause message to exceed max size */
if (payload_len >
(MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
pr_err("payload length %d exceeds max:%d\n", payload_len,
(int)(MMAL_MSG_MAX_SIZE -
sizeof(struct mmal_msg_header)));
return -EINVAL;
}
msg_context = get_msg_context(instance);
if (IS_ERR(msg_context))
return PTR_ERR(msg_context);
init_completion(&msg_context->u.sync.cmplt);
msg->h.magic = MMAL_MAGIC;
msg->h.context = msg_context->handle;
msg->h.status = 0;
DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
">>> sync message");
vchiq_use_service(instance->vchiq_instance, instance->service_handle);
ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, msg,
sizeof(struct mmal_msg_header) +
payload_len);
vchiq_release_service(instance->vchiq_instance, instance->service_handle);
if (ret) {
pr_err("error %d queuing message\n", ret);
release_msg_context(msg_context);
return ret;
}
timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
SYNC_MSG_TIMEOUT * HZ);
if (timeout == 0) {
pr_err("timed out waiting for sync completion\n");
ret = -ETIME;
/* todo: what happens if the message arrives after aborting */
release_msg_context(msg_context);
return ret;
}
*msg_out = msg_context->u.sync.msg;
*msg_handle = msg_context->u.sync.msg_handle;
release_msg_context(msg_context);
return 0;
}
static void dump_port_info(struct vchiq_mmal_port *port)
{
pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
pr_debug("buffer minimum num:%d size:%d align:%d\n",
port->minimum_buffer.num,
port->minimum_buffer.size, port->minimum_buffer.alignment);
pr_debug("buffer recommended num:%d size:%d align:%d\n",
port->recommended_buffer.num,
port->recommended_buffer.size,
port->recommended_buffer.alignment);
pr_debug("buffer current values num:%d size:%d align:%d\n",
port->current_buffer.num,
port->current_buffer.size, port->current_buffer.alignment);
pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
port->format.type,
port->format.encoding, port->format.encoding_variant);
pr_debug(" bitrate:%d flags:0x%x\n",
port->format.bitrate, port->format.flags);
if (port->format.type == MMAL_ES_TYPE_VIDEO) {
pr_debug
("es video format: width:%d height:%d colourspace:0x%x\n",
port->es.video.width, port->es.video.height,
port->es.video.color_space);
pr_debug(" : crop xywh %d,%d,%d,%d\n",
port->es.video.crop.x,
port->es.video.crop.y,
port->es.video.crop.width, port->es.video.crop.height);
pr_debug(" : framerate %d/%d aspect %d/%d\n",
port->es.video.frame_rate.numerator,
port->es.video.frame_rate.denominator,
port->es.video.par.numerator, port->es.video.par.denominator);
}
}
static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
{
/* todo do readonly fields need setting at all? */
p->type = port->type;
p->index = port->index;
p->index_all = 0;
p->is_enabled = port->enabled;
p->buffer_num_min = port->minimum_buffer.num;
p->buffer_size_min = port->minimum_buffer.size;
p->buffer_alignment_min = port->minimum_buffer.alignment;
p->buffer_num_recommended = port->recommended_buffer.num;
p->buffer_size_recommended = port->recommended_buffer.size;
/* only three writable fields in a port */
p->buffer_num = port->current_buffer.num;
p->buffer_size = port->current_buffer.size;
p->userdata = (u32)(unsigned long)port;
}
static int port_info_set(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port)
{
int ret;
struct mmal_msg m;
struct mmal_msg *rmsg;
struct vchiq_header *rmsg_handle;
pr_debug("setting port info port %p\n", port);
if (!port)
return -1;
dump_port_info(port);
m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
m.u.port_info_set.component_handle = port->component->handle;
m.u.port_info_set.port_type = port->type;
m.u.port_info_set.port_index = port->index;
port_to_mmal_msg(port, &m.u.port_info_set.port);
/* elementary stream format setup */
m.u.port_info_set.format.type = port->format.type;
m.u.port_info_set.format.encoding = port->format.encoding;
m.u.port_info_set.format.encoding_variant =
port->format.encoding_variant;
m.u.port_info_set.format.bitrate = port->format.bitrate;
m.u.port_info_set.format.flags = port->format.flags;
memcpy(&m.u.port_info_set.es, &port->es,
sizeof(union mmal_es_specific_format));
m.u.port_info_set.format.extradata_size = port->format.extradata_size;
memcpy(&m.u.port_info_set.extradata, port->format.extradata,
port->format.extradata_size);
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.port_info_set),
&rmsg, &rmsg_handle);
if (ret)
return ret;
if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
/* got an unexpected message type in reply */
ret = -EINVAL;
goto release_msg;
}
/* return operation status */
ret = -rmsg->u.port_info_get_reply.status;
pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
port->component->handle, port->handle);
release_msg:
vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
/* use port info get message to retrieve port information */
static int port_info_get(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port)
{
int ret;
struct mmal_msg m;
struct mmal_msg *rmsg;
struct vchiq_header *rmsg_handle;
/* port info time */
m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
m.u.port_info_get.component_handle = port->component->handle;
m.u.port_info_get.port_type = port->type;
m.u.port_info_get.index = port->index;
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.port_info_get),
&rmsg, &rmsg_handle);
if (ret)
return ret;
if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
/* got an unexpected message type in reply */
ret = -EINVAL;
goto release_msg;
}
/* return operation status */
ret = -rmsg->u.port_info_get_reply.status;
if (ret != MMAL_MSG_STATUS_SUCCESS)
goto release_msg;
if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
port->enabled = false;
else
port->enabled = true;
/* copy the values out of the message */
port->handle = rmsg->u.port_info_get_reply.port_handle;
/* port type and index cached to use on port info set because
* it does not use a port handle
*/
port->type = rmsg->u.port_info_get_reply.port_type;
port->index = rmsg->u.port_info_get_reply.port_index;
port->minimum_buffer.num =
rmsg->u.port_info_get_reply.port.buffer_num_min;
port->minimum_buffer.size =
rmsg->u.port_info_get_reply.port.buffer_size_min;
port->minimum_buffer.alignment =
rmsg->u.port_info_get_reply.port.buffer_alignment_min;
port->recommended_buffer.alignment =
rmsg->u.port_info_get_reply.port.buffer_alignment_min;
port->recommended_buffer.num =
rmsg->u.port_info_get_reply.port.buffer_num_recommended;
port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
port->current_buffer.size =
rmsg->u.port_info_get_reply.port.buffer_size;
/* stream format */
port->format.type = rmsg->u.port_info_get_reply.format.type;
port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
port->format.encoding_variant =
rmsg->u.port_info_get_reply.format.encoding_variant;
port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
port->format.flags = rmsg->u.port_info_get_reply.format.flags;
/* elementary stream format */
memcpy(&port->es,
&rmsg->u.port_info_get_reply.es,
sizeof(union mmal_es_specific_format));
port->format.es = &port->es;
port->format.extradata_size =
rmsg->u.port_info_get_reply.format.extradata_size;
memcpy(port->format.extradata,
rmsg->u.port_info_get_reply.extradata,
port->format.extradata_size);
pr_debug("received port info\n");
dump_port_info(port);
release_msg:
pr_debug("%s:result:%d component:0x%x port:%d\n",
__func__, ret, port->component->handle, port->handle);
vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
/* create component on vc */
static int create_component(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_component *component,
const char *name)
{
int ret;
struct mmal_msg m;
struct mmal_msg *rmsg;
struct vchiq_header *rmsg_handle;
/* build component create message */
m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
m.u.component_create.client_component = component->client_component;
strncpy(m.u.component_create.name, name,
sizeof(m.u.component_create.name));
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.component_create),
&rmsg, &rmsg_handle);
if (ret)
return ret;
if (rmsg->h.type != m.h.type) {
/* got an unexpected message type in reply */
ret = -EINVAL;
goto release_msg;
}
ret = -rmsg->u.component_create_reply.status;
if (ret != MMAL_MSG_STATUS_SUCCESS)
goto release_msg;
/* a valid component response received */
component->handle = rmsg->u.component_create_reply.component_handle;
component->inputs = rmsg->u.component_create_reply.input_num;
component->outputs = rmsg->u.component_create_reply.output_num;
component->clocks = rmsg->u.component_create_reply.clock_num;
pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
component->handle,
component->inputs, component->outputs, component->clocks);
release_msg:
vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
/* destroys a component on vc */
static int destroy_component(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_component *component)
{
int ret;
struct mmal_msg m;
struct mmal_msg *rmsg;
struct vchiq_header *rmsg_handle;
m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
m.u.component_destroy.component_handle = component->handle;
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.component_destroy),
&rmsg, &rmsg_handle);
if (ret)
return ret;
if (rmsg->h.type != m.h.type) {
/* got an unexpected message type in reply */
ret = -EINVAL;
goto release_msg;
}
ret = -rmsg->u.component_destroy_reply.status;
release_msg:
vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
/* enable a component on vc */
static int enable_component(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_component *component)
{
int ret;
struct mmal_msg m;
struct mmal_msg *rmsg;
struct vchiq_header *rmsg_handle;
m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
m.u.component_enable.component_handle = component->handle;
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.component_enable),
&rmsg, &rmsg_handle);
if (ret)
return ret;
if (rmsg->h.type != m.h.type) {
/* got an unexpected message type in reply */
ret = -EINVAL;
goto release_msg;
}
ret = -rmsg->u.component_enable_reply.status;
release_msg:
vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
/* disable a component on vc */
static int disable_component(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_component *component)
{
int ret;
struct mmal_msg m;
struct mmal_msg *rmsg;
struct vchiq_header *rmsg_handle;
m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
m.u.component_disable.component_handle = component->handle;
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.component_disable),
&rmsg, &rmsg_handle);
if (ret)
return ret;
if (rmsg->h.type != m.h.type) {
/* got an unexpected message type in reply */
ret = -EINVAL;
goto release_msg;
}
ret = -rmsg->u.component_disable_reply.status;
release_msg:
vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
/* get version of mmal implementation */
static int get_version(struct vchiq_mmal_instance *instance,
u32 *major_out, u32 *minor_out)
{
int ret;
struct mmal_msg m;
struct mmal_msg *rmsg;
struct vchiq_header *rmsg_handle;
m.h.type = MMAL_MSG_TYPE_GET_VERSION;
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.version),
&rmsg, &rmsg_handle);
if (ret)
return ret;
if (rmsg->h.type != m.h.type) {
/* got an unexpected message type in reply */
ret = -EINVAL;
goto release_msg;
}
*major_out = rmsg->u.version.major;
*minor_out = rmsg->u.version.minor;
release_msg:
vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
/* do a port action with a port as a parameter */
static int port_action_port(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port,
enum mmal_msg_port_action_type action_type)
{
int ret;
struct mmal_msg m;
struct mmal_msg *rmsg;
struct vchiq_header *rmsg_handle;
m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
m.u.port_action_port.component_handle = port->component->handle;
m.u.port_action_port.port_handle = port->handle;
m.u.port_action_port.action = action_type;
port_to_mmal_msg(port, &m.u.port_action_port.port);
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.port_action_port),
&rmsg, &rmsg_handle);
if (ret)
return ret;
if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
/* got an unexpected message type in reply */
ret = -EINVAL;
goto release_msg;
}
ret = -rmsg->u.port_action_reply.status;
pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
__func__,
ret, port->component->handle, port->handle,
port_action_type_names[action_type], action_type);
release_msg:
vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
/* do a port action with handles as parameters */
static int port_action_handle(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port,
enum mmal_msg_port_action_type action_type,
u32 connect_component_handle,
u32 connect_port_handle)
{
int ret;
struct mmal_msg m;
struct mmal_msg *rmsg;
struct vchiq_header *rmsg_handle;
m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
m.u.port_action_handle.component_handle = port->component->handle;
m.u.port_action_handle.port_handle = port->handle;
m.u.port_action_handle.action = action_type;
m.u.port_action_handle.connect_component_handle =
connect_component_handle;
m.u.port_action_handle.connect_port_handle = connect_port_handle;
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.port_action_handle),
&rmsg, &rmsg_handle);
if (ret)
return ret;
if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
/* got an unexpected message type in reply */
ret = -EINVAL;
goto release_msg;
}
ret = -rmsg->u.port_action_reply.status;
pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
__func__,
ret, port->component->handle, port->handle,
port_action_type_names[action_type],
action_type, connect_component_handle, connect_port_handle);
release_msg:
vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
static int port_parameter_set(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port,
u32 parameter_id, void *value, u32 value_size)
{
int ret;
struct mmal_msg m;
struct mmal_msg *rmsg;
struct vchiq_header *rmsg_handle;
m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
m.u.port_parameter_set.component_handle = port->component->handle;
m.u.port_parameter_set.port_handle = port->handle;
m.u.port_parameter_set.id = parameter_id;
m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
memcpy(&m.u.port_parameter_set.value, value, value_size);
ret = send_synchronous_mmal_msg(instance, &m,
(4 * sizeof(u32)) + value_size,
&rmsg, &rmsg_handle);
if (ret)
return ret;
if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
/* got an unexpected message type in reply */
ret = -EINVAL;
goto release_msg;
}
ret = -rmsg->u.port_parameter_set_reply.status;
pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
__func__,
ret, port->component->handle, port->handle, parameter_id);
release_msg:
vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
static int port_parameter_get(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port,
u32 parameter_id, void *value, u32 *value_size)
{
int ret;
struct mmal_msg m;
struct mmal_msg *rmsg;
struct vchiq_header *rmsg_handle;
m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
m.u.port_parameter_get.component_handle = port->component->handle;
m.u.port_parameter_get.port_handle = port->handle;
m.u.port_parameter_get.id = parameter_id;
m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(struct
mmal_msg_port_parameter_get),
&rmsg, &rmsg_handle);
if (ret)
return ret;
if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
/* got an unexpected message type in reply */
pr_err("Incorrect reply type %d\n", rmsg->h.type);
ret = -EINVAL;
goto release_msg;
}
ret = rmsg->u.port_parameter_get_reply.status;
/* port_parameter_get_reply.size includes the header,
* whilst *value_size doesn't.
*/
rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
/* Copy only as much as we have space for
* but report true size of parameter
*/
memcpy(value, &rmsg->u.port_parameter_get_reply.value,
*value_size);
} else {
memcpy(value, &rmsg->u.port_parameter_get_reply.value,
rmsg->u.port_parameter_get_reply.size);
}
/* Always report the size of the returned parameter to the caller */
*value_size = rmsg->u.port_parameter_get_reply.size;
pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
ret, port->component->handle, port->handle, parameter_id);
release_msg:
vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
/* disables a port and drains buffers from it */
static int port_disable(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port)
{
int ret;
struct list_head *q, *buf_head;
unsigned long flags = 0;
if (!port->enabled)
return 0;
port->enabled = false;
ret = port_action_port(instance, port,
MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
if (ret == 0) {
/*
* Drain all queued buffers on port. This should only
* apply to buffers that have been queued before the port
* has been enabled. If the port has been enabled and buffers
* passed, then the buffers should have been removed from this
* list, and we should get the relevant callbacks via VCHIQ
* to release the buffers.
*/
spin_lock_irqsave(&port->slock, flags);
list_for_each_safe(buf_head, q, &port->buffers) {
struct mmal_buffer *mmalbuf;
mmalbuf = list_entry(buf_head, struct mmal_buffer,
list);
list_del(buf_head);
if (port->buffer_cb) {
mmalbuf->length = 0;
mmalbuf->mmal_flags = 0;
mmalbuf->dts = MMAL_TIME_UNKNOWN;
mmalbuf->pts = MMAL_TIME_UNKNOWN;
port->buffer_cb(instance,
port, 0, mmalbuf);
}
}
spin_unlock_irqrestore(&port->slock, flags);
ret = port_info_get(instance, port);
}
return ret;
}
/* enable a port */
static int port_enable(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port)
{
unsigned int hdr_count;
struct list_head *q, *buf_head;
int ret;
if (port->enabled)
return 0;
ret = port_action_port(instance, port,
MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
if (ret)
goto done;
port->enabled = true;
if (port->buffer_cb) {
/* send buffer headers to videocore */
hdr_count = 1;
list_for_each_safe(buf_head, q, &port->buffers) {
struct mmal_buffer *mmalbuf;
mmalbuf = list_entry(buf_head, struct mmal_buffer,
list);
ret = buffer_from_host(instance, port, mmalbuf);
if (ret)
goto done;
list_del(buf_head);
hdr_count++;
if (hdr_count > port->current_buffer.num)
break;
}
}
ret = port_info_get(instance, port);
done:
return ret;
}
/* ------------------------------------------------------------------
* Exported API
*------------------------------------------------------------------
*/
int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port)
{
int ret;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
ret = port_info_set(instance, port);
if (ret)
goto release_unlock;
/* read what has actually been set */
ret = port_info_get(instance, port);
release_unlock:
mutex_unlock(&instance->vchiq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port,
u32 parameter, void *value, u32 value_size)
{
int ret;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
ret = port_parameter_set(instance, port, parameter, value, value_size);
mutex_unlock(&instance->vchiq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port,
u32 parameter, void *value, u32 *value_size)
{
int ret;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
ret = port_parameter_get(instance, port, parameter, value, value_size);
mutex_unlock(&instance->vchiq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
/* enable a port
*
* enables a port and queues buffers for satisfying callbacks if we
* provide a callback handler
*/
int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port,
vchiq_mmal_buffer_cb buffer_cb)
{
int ret;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
/* already enabled - noop */
if (port->enabled) {
ret = 0;
goto unlock;
}
port->buffer_cb = buffer_cb;
ret = port_enable(instance, port);
unlock:
mutex_unlock(&instance->vchiq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port)
{
int ret;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
if (!port->enabled) {
mutex_unlock(&instance->vchiq_mutex);
return 0;
}
ret = port_disable(instance, port);
mutex_unlock(&instance->vchiq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
/* ports will be connected in a tunneled manner so data buffers
* are not handled by client.
*/
int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *src,
struct vchiq_mmal_port *dst)
{
int ret;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
/* disconnect ports if connected */
if (src->connected) {
ret = port_disable(instance, src);
if (ret) {
pr_err("failed disabling src port(%d)\n", ret);
goto release_unlock;
}
/* do not need to disable the destination port as they
* are connected and it is done automatically
*/
ret = port_action_handle(instance, src,
MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
src->connected->component->handle,
src->connected->handle);
if (ret < 0) {
pr_err("failed disconnecting src port\n");
goto release_unlock;
}
src->connected->enabled = false;
src->connected = NULL;
}
if (!dst) {
/* do not make new connection */
ret = 0;
pr_debug("not making new connection\n");
goto release_unlock;
}
/* copy src port format to dst */
dst->format.encoding = src->format.encoding;
dst->es.video.width = src->es.video.width;
dst->es.video.height = src->es.video.height;
dst->es.video.crop.x = src->es.video.crop.x;
dst->es.video.crop.y = src->es.video.crop.y;
dst->es.video.crop.width = src->es.video.crop.width;
dst->es.video.crop.height = src->es.video.crop.height;
dst->es.video.frame_rate.numerator = src->es.video.frame_rate.numerator;
dst->es.video.frame_rate.denominator = src->es.video.frame_rate.denominator;
/* set new format */
ret = port_info_set(instance, dst);
if (ret) {
pr_debug("setting port info failed\n");
goto release_unlock;
}
/* read what has actually been set */
ret = port_info_get(instance, dst);
if (ret) {
pr_debug("read back port info failed\n");
goto release_unlock;
}
/* connect two ports together */
ret = port_action_handle(instance, src,
MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
dst->component->handle, dst->handle);
if (ret < 0) {
pr_debug("connecting port %d:%d to %d:%d failed\n",
src->component->handle, src->handle,
dst->component->handle, dst->handle);
goto release_unlock;
}
src->connected = dst;
release_unlock:
mutex_unlock(&instance->vchiq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port,
struct mmal_buffer *buffer)
{
unsigned long flags = 0;
int ret;
ret = buffer_from_host(instance, port, buffer);
if (ret == -EINVAL) {
/* Port is disabled. Queue for when it is enabled. */
spin_lock_irqsave(&port->slock, flags);
list_add_tail(&buffer->list, &port->buffers);
spin_unlock_irqrestore(&port->slock, flags);
}
return 0;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
struct mmal_buffer *buf)
{
struct mmal_msg_context *msg_context = get_msg_context(instance);
if (IS_ERR(msg_context))
return (PTR_ERR(msg_context));
buf->msg_context = msg_context;
return 0;
}
EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
{
struct mmal_msg_context *msg_context = buf->msg_context;
if (msg_context)
release_msg_context(msg_context);
buf->msg_context = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
/* Initialise a mmal component and its ports
*
*/
int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
const char *name,
struct vchiq_mmal_component **component_out)
{
int ret;
int idx; /* port index */
struct vchiq_mmal_component *component = NULL;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
if (!instance->component[idx].in_use) {
component = &instance->component[idx];
component->in_use = true;
break;
}
}
if (!component) {
ret = -EINVAL; /* todo is this correct error? */
goto unlock;
}
/* We need a handle to reference back to our component structure.
* Use the array index in instance->component rather than rolling
* another IDR.
*/
component->client_component = idx;
ret = create_component(instance, component, name);
if (ret < 0) {
pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
__func__, ret);
goto unlock;
}
/* ports info needs gathering */
component->control.type = MMAL_PORT_TYPE_CONTROL;
component->control.index = 0;
component->control.component = component;
spin_lock_init(&component->control.slock);
INIT_LIST_HEAD(&component->control.buffers);
ret = port_info_get(instance, &component->control);
if (ret < 0)
goto release_component;
for (idx = 0; idx < component->inputs; idx++) {
component->input[idx].type = MMAL_PORT_TYPE_INPUT;
component->input[idx].index = idx;
component->input[idx].component = component;
spin_lock_init(&component->input[idx].slock);
INIT_LIST_HEAD(&component->input[idx].buffers);
ret = port_info_get(instance, &component->input[idx]);
if (ret < 0)
goto release_component;
}
for (idx = 0; idx < component->outputs; idx++) {
component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
component->output[idx].index = idx;
component->output[idx].component = component;
spin_lock_init(&component->output[idx].slock);
INIT_LIST_HEAD(&component->output[idx].buffers);
ret = port_info_get(instance, &component->output[idx]);
if (ret < 0)
goto release_component;
}
for (idx = 0; idx < component->clocks; idx++) {
component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
component->clock[idx].index = idx;
component->clock[idx].component = component;
spin_lock_init(&component->clock[idx].slock);
INIT_LIST_HEAD(&component->clock[idx].buffers);
ret = port_info_get(instance, &component->clock[idx]);
if (ret < 0)
goto release_component;
}
*component_out = component;
mutex_unlock(&instance->vchiq_mutex);
return 0;
release_component:
destroy_component(instance, component);
unlock:
if (component)
component->in_use = false;
mutex_unlock(&instance->vchiq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
/*
* cause a mmal component to be destroyed
*/
int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_component *component)
{
int ret;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
if (component->enabled)
ret = disable_component(instance, component);
ret = destroy_component(instance, component);
component->in_use = false;
mutex_unlock(&instance->vchiq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
/*
* cause a mmal component to be enabled
*/
int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_component *component)
{
int ret;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
if (component->enabled) {
mutex_unlock(&instance->vchiq_mutex);
return 0;
}
ret = enable_component(instance, component);
if (ret == 0)
component->enabled = true;
mutex_unlock(&instance->vchiq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
/*
* cause a mmal component to be enabled
*/
int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_component *component)
{
int ret;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
if (!component->enabled) {
mutex_unlock(&instance->vchiq_mutex);
return 0;
}
ret = disable_component(instance, component);
if (ret == 0)
component->enabled = false;
mutex_unlock(&instance->vchiq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
u32 *major_out, u32 *minor_out)
{
int ret;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
ret = get_version(instance, major_out, minor_out);
mutex_unlock(&instance->vchiq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_version);
int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
{
int status = 0;
if (!instance)
return -EINVAL;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
vchiq_use_service(instance->vchiq_instance, instance->service_handle);
status = vchiq_close_service(instance->vchiq_instance, instance->service_handle);
if (status != 0)
pr_err("mmal-vchiq: VCHIQ close failed\n");
mutex_unlock(&instance->vchiq_mutex);
vchiq_shutdown(instance->vchiq_instance);
destroy_workqueue(instance->bulk_wq);
idr_destroy(&instance->context_map);
kfree(instance);
return status;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
{
int status;
int err = -ENODEV;
struct vchiq_mmal_instance *instance;
struct vchiq_instance *vchiq_instance;
struct vchiq_service_params_kernel params = {
.version = VC_MMAL_VER,
.version_min = VC_MMAL_MIN_VER,
.fourcc = VCHIQ_MAKE_FOURCC('m', 'm', 'a', 'l'),
.callback = service_callback,
.userdata = NULL,
};
/* compile time checks to ensure structure size as they are
* directly (de)serialised from memory.
*/
/* ensure the header structure has packed to the correct size */
BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
/* ensure message structure does not exceed maximum length */
BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
/* mmal port struct is correct size */
BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
/* create a vchi instance */
status = vchiq_initialise(&vchiq_instance);
if (status) {
pr_err("Failed to initialise VCHI instance (status=%d)\n",
status);
return -EIO;
}
status = vchiq_connect(vchiq_instance);
if (status) {
pr_err("Failed to connect VCHI instance (status=%d)\n", status);
err = -EIO;
goto err_shutdown_vchiq;
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
err = -ENOMEM;
goto err_shutdown_vchiq;
}
mutex_init(&instance->vchiq_mutex);
instance->vchiq_instance = vchiq_instance;
mutex_init(&instance->context_map_lock);
idr_init_base(&instance->context_map, 1);
params.userdata = instance;
instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
WQ_MEM_RECLAIM);
if (!instance->bulk_wq)
goto err_free;
status = vchiq_open_service(vchiq_instance, ¶ms,
&instance->service_handle);
if (status) {
pr_err("Failed to open VCHI service connection (status=%d)\n",
status);
goto err_close_services;
}
vchiq_release_service(instance->vchiq_instance, instance->service_handle);
*out_instance = instance;
return 0;
err_close_services:
vchiq_close_service(instance->vchiq_instance, instance->service_handle);
destroy_workqueue(instance->bulk_wq);
err_free:
kfree(instance);
err_shutdown_vchiq:
vchiq_shutdown(vchiq_instance);
return err;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_init);
MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
MODULE_AUTHOR("Dave Stevenson, <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011 Broadcom Corporation. All rights reserved. */
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <sound/asoundef.h>
#include "bcm2835.h"
/* hardware definition */
static const struct snd_pcm_hardware snd_bcm2835_playback_hw = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_SYNC_APPLPTR | SNDRV_PCM_INFO_BATCH),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_192000,
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 1,
.channels_max = 8,
.buffer_bytes_max = 512 * 1024,
.period_bytes_min = 1 * 1024,
.period_bytes_max = 512 * 1024,
.periods_min = 1,
.periods_max = 128,
};
static const struct snd_pcm_hardware snd_bcm2835_playback_spdif_hw = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_SYNC_APPLPTR | SNDRV_PCM_INFO_BATCH),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000,
.rate_min = 44100,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 128 * 1024,
.period_bytes_min = 1 * 1024,
.period_bytes_max = 128 * 1024,
.periods_min = 1,
.periods_max = 128,
};
static void snd_bcm2835_playback_free(struct snd_pcm_runtime *runtime)
{
kfree(runtime->private_data);
}
void bcm2835_playback_fifo(struct bcm2835_alsa_stream *alsa_stream,
unsigned int bytes)
{
struct snd_pcm_substream *substream = alsa_stream->substream;
unsigned int pos;
if (!alsa_stream->period_size)
return;
if (bytes >= alsa_stream->buffer_size) {
snd_pcm_stream_lock(substream);
snd_pcm_stop(substream,
alsa_stream->draining ?
SNDRV_PCM_STATE_SETUP :
SNDRV_PCM_STATE_XRUN);
snd_pcm_stream_unlock(substream);
return;
}
pos = atomic_read(&alsa_stream->pos);
pos += bytes;
pos %= alsa_stream->buffer_size;
atomic_set(&alsa_stream->pos, pos);
alsa_stream->period_offset += bytes;
alsa_stream->interpolate_start = ktime_get();
if (alsa_stream->period_offset >= alsa_stream->period_size) {
alsa_stream->period_offset %= alsa_stream->period_size;
snd_pcm_period_elapsed(substream);
}
}
/* open callback */
static int snd_bcm2835_playback_open_generic(struct snd_pcm_substream *substream, int spdif)
{
struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream;
int idx;
int err;
mutex_lock(&chip->audio_mutex);
idx = substream->number;
if (spdif && chip->opened) {
err = -EBUSY;
goto out;
} else if (!spdif && (chip->opened & (1 << idx))) {
err = -EBUSY;
goto out;
}
if (idx >= MAX_SUBSTREAMS) {
dev_err(chip->dev,
"substream(%d) device doesn't exist max(%d) substreams allowed\n",
idx, MAX_SUBSTREAMS);
err = -ENODEV;
goto out;
}
alsa_stream = kzalloc(sizeof(*alsa_stream), GFP_KERNEL);
if (!alsa_stream) {
err = -ENOMEM;
goto out;
}
/* Initialise alsa_stream */
alsa_stream->chip = chip;
alsa_stream->substream = substream;
alsa_stream->idx = idx;
err = bcm2835_audio_open(alsa_stream);
if (err) {
kfree(alsa_stream);
goto out;
}
runtime->private_data = alsa_stream;
runtime->private_free = snd_bcm2835_playback_free;
if (spdif) {
runtime->hw = snd_bcm2835_playback_spdif_hw;
} else {
/* clear spdif status, as we are not in spdif mode */
chip->spdif_status = 0;
runtime->hw = snd_bcm2835_playback_hw;
}
/* minimum 16 bytes alignment (for vchiq bulk transfers) */
snd_pcm_hw_constraint_step(runtime,
0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
16);
/* position update is in 10ms order */
snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_TIME,
10 * 1000, UINT_MAX);
chip->alsa_stream[idx] = alsa_stream;
chip->opened |= (1 << idx);
out:
mutex_unlock(&chip->audio_mutex);
return err;
}
static int snd_bcm2835_playback_open(struct snd_pcm_substream *substream)
{
return snd_bcm2835_playback_open_generic(substream, 0);
}
static int snd_bcm2835_playback_spdif_open(struct snd_pcm_substream *substream)
{
return snd_bcm2835_playback_open_generic(substream, 1);
}
static int snd_bcm2835_playback_close(struct snd_pcm_substream *substream)
{
struct bcm2835_alsa_stream *alsa_stream;
struct snd_pcm_runtime *runtime;
struct bcm2835_chip *chip;
chip = snd_pcm_substream_chip(substream);
mutex_lock(&chip->audio_mutex);
runtime = substream->runtime;
alsa_stream = runtime->private_data;
alsa_stream->period_size = 0;
alsa_stream->buffer_size = 0;
bcm2835_audio_close(alsa_stream);
alsa_stream->chip->alsa_stream[alsa_stream->idx] = NULL;
/*
* Do not free up alsa_stream here, it will be freed up by
* runtime->private_free callback we registered in *_open above
*/
chip->opened &= ~(1 << substream->number);
mutex_unlock(&chip->audio_mutex);
return 0;
}
static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
{
struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
int channels;
int err;
/* notify the vchiq that it should enter spdif passthrough mode by
* setting channels=0 (see
* https://github.com/raspberrypi/linux/issues/528)
*/
if (chip->spdif_status & IEC958_AES0_NONAUDIO)
channels = 0;
else
channels = runtime->channels;
err = bcm2835_audio_set_params(alsa_stream, channels,
runtime->rate,
snd_pcm_format_width(runtime->format));
if (err < 0)
return err;
memset(&alsa_stream->pcm_indirect, 0, sizeof(alsa_stream->pcm_indirect));
alsa_stream->pcm_indirect.hw_buffer_size =
alsa_stream->pcm_indirect.sw_buffer_size =
snd_pcm_lib_buffer_bytes(substream);
alsa_stream->buffer_size = snd_pcm_lib_buffer_bytes(substream);
alsa_stream->period_size = snd_pcm_lib_period_bytes(substream);
atomic_set(&alsa_stream->pos, 0);
alsa_stream->period_offset = 0;
alsa_stream->draining = false;
alsa_stream->interpolate_start = ktime_get();
return 0;
}
static void snd_bcm2835_pcm_transfer(struct snd_pcm_substream *substream,
struct snd_pcm_indirect *rec, size_t bytes)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
void *src = (void *)(substream->runtime->dma_area + rec->sw_data);
bcm2835_audio_write(alsa_stream, bytes, src);
}
static int snd_bcm2835_pcm_ack(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
struct snd_pcm_indirect *pcm_indirect = &alsa_stream->pcm_indirect;
return snd_pcm_indirect_playback_transfer(substream, pcm_indirect,
snd_bcm2835_pcm_transfer);
}
/* trigger callback */
static int snd_bcm2835_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
return bcm2835_audio_start(alsa_stream);
case SNDRV_PCM_TRIGGER_DRAIN:
alsa_stream->draining = true;
return bcm2835_audio_drain(alsa_stream);
case SNDRV_PCM_TRIGGER_STOP:
return bcm2835_audio_stop(alsa_stream);
default:
return -EINVAL;
}
}
/* pointer callback */
static snd_pcm_uframes_t
snd_bcm2835_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
ktime_t now = ktime_get();
/* Give userspace better delay reporting by interpolating between GPU
* notifications, assuming audio speed is close enough to the clock
* used for ktime
*/
if ((ktime_to_ns(alsa_stream->interpolate_start)) &&
(ktime_compare(alsa_stream->interpolate_start, now) < 0)) {
u64 interval =
(ktime_to_ns(ktime_sub(now,
alsa_stream->interpolate_start)));
u64 frames_output_in_interval =
div_u64((interval * runtime->rate), 1000000000);
snd_pcm_sframes_t frames_output_in_interval_sized =
-frames_output_in_interval;
runtime->delay = frames_output_in_interval_sized;
}
return snd_pcm_indirect_playback_pointer(substream,
&alsa_stream->pcm_indirect,
atomic_read(&alsa_stream->pos));
}
/* operators */
static const struct snd_pcm_ops snd_bcm2835_playback_ops = {
.open = snd_bcm2835_playback_open,
.close = snd_bcm2835_playback_close,
.prepare = snd_bcm2835_pcm_prepare,
.trigger = snd_bcm2835_pcm_trigger,
.pointer = snd_bcm2835_pcm_pointer,
.ack = snd_bcm2835_pcm_ack,
};
static const struct snd_pcm_ops snd_bcm2835_playback_spdif_ops = {
.open = snd_bcm2835_playback_spdif_open,
.close = snd_bcm2835_playback_close,
.prepare = snd_bcm2835_pcm_prepare,
.trigger = snd_bcm2835_pcm_trigger,
.pointer = snd_bcm2835_pcm_pointer,
.ack = snd_bcm2835_pcm_ack,
};
/* create a pcm device */
int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, const char *name,
int idx, enum snd_bcm2835_route route,
u32 numchannels, bool spdif)
{
struct snd_pcm *pcm;
int err;
err = snd_pcm_new(chip->card, name, idx, numchannels, 0, &pcm);
if (err)
return err;
pcm->private_data = chip;
pcm->nonatomic = true;
strscpy(pcm->name, name, sizeof(pcm->name));
if (!spdif) {
chip->dest = route;
chip->volume = 0;
chip->mute = CTRL_VOL_UNMUTE;
}
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
spdif ? &snd_bcm2835_playback_spdif_ops :
&snd_bcm2835_playback_ops);
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
chip->card->dev, 128 * 1024, 128 * 1024);
if (spdif)
chip->pcm_spdif = pcm;
else
chip->pcm = pcm;
return 0;
}
| linux-master | drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011 Broadcom Corporation. All rights reserved. */
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "bcm2835.h"
static bool enable_hdmi;
static bool enable_headphones = true;
static int num_channels = MAX_SUBSTREAMS;
module_param(enable_hdmi, bool, 0444);
MODULE_PARM_DESC(enable_hdmi, "Enables HDMI virtual audio device");
module_param(enable_headphones, bool, 0444);
MODULE_PARM_DESC(enable_headphones, "Enables Headphones virtual audio device");
module_param(num_channels, int, 0644);
MODULE_PARM_DESC(num_channels, "Number of audio channels (default: 8)");
static void bcm2835_devm_free_vchi_ctx(struct device *dev, void *res)
{
struct bcm2835_vchi_ctx *vchi_ctx = res;
bcm2835_free_vchi_ctx(vchi_ctx);
}
static int bcm2835_devm_add_vchi_ctx(struct device *dev)
{
struct bcm2835_vchi_ctx *vchi_ctx;
int ret;
vchi_ctx = devres_alloc(bcm2835_devm_free_vchi_ctx, sizeof(*vchi_ctx),
GFP_KERNEL);
if (!vchi_ctx)
return -ENOMEM;
ret = bcm2835_new_vchi_ctx(dev, vchi_ctx);
if (ret) {
devres_free(vchi_ctx);
return ret;
}
devres_add(dev, vchi_ctx);
return 0;
}
struct bcm2835_audio_driver {
struct device_driver driver;
const char *shortname;
const char *longname;
int minchannels;
int (*newpcm)(struct bcm2835_chip *chip, const char *name,
enum snd_bcm2835_route route, u32 numchannels);
int (*newctl)(struct bcm2835_chip *chip);
enum snd_bcm2835_route route;
};
static int bcm2835_audio_dual_newpcm(struct bcm2835_chip *chip,
const char *name,
enum snd_bcm2835_route route,
u32 numchannels)
{
int err;
err = snd_bcm2835_new_pcm(chip, name, 0, route,
numchannels, false);
if (err)
return err;
err = snd_bcm2835_new_pcm(chip, "IEC958", 1, route, 1, true);
if (err)
return err;
return 0;
}
static int bcm2835_audio_simple_newpcm(struct bcm2835_chip *chip,
const char *name,
enum snd_bcm2835_route route,
u32 numchannels)
{
return snd_bcm2835_new_pcm(chip, name, 0, route, numchannels, false);
}
static struct bcm2835_audio_driver bcm2835_audio_hdmi = {
.driver = {
.name = "bcm2835_hdmi",
.owner = THIS_MODULE,
},
.shortname = "bcm2835 HDMI",
.longname = "bcm2835 HDMI",
.minchannels = 1,
.newpcm = bcm2835_audio_dual_newpcm,
.newctl = snd_bcm2835_new_hdmi_ctl,
.route = AUDIO_DEST_HDMI
};
static struct bcm2835_audio_driver bcm2835_audio_headphones = {
.driver = {
.name = "bcm2835_headphones",
.owner = THIS_MODULE,
},
.shortname = "bcm2835 Headphones",
.longname = "bcm2835 Headphones",
.minchannels = 1,
.newpcm = bcm2835_audio_simple_newpcm,
.newctl = snd_bcm2835_new_headphones_ctl,
.route = AUDIO_DEST_HEADPHONES
};
struct bcm2835_audio_drivers {
struct bcm2835_audio_driver *audio_driver;
const bool *is_enabled;
};
static struct bcm2835_audio_drivers children_devices[] = {
{
.audio_driver = &bcm2835_audio_hdmi,
.is_enabled = &enable_hdmi,
},
{
.audio_driver = &bcm2835_audio_headphones,
.is_enabled = &enable_headphones,
},
};
static void bcm2835_card_free(void *data)
{
snd_card_free(data);
}
static int snd_add_child_device(struct device *dev,
struct bcm2835_audio_driver *audio_driver,
u32 numchans)
{
struct bcm2835_chip *chip;
struct snd_card *card;
int err;
err = snd_card_new(dev, -1, NULL, THIS_MODULE, sizeof(*chip), &card);
if (err < 0) {
dev_err(dev, "Failed to create card");
return err;
}
chip = card->private_data;
chip->card = card;
chip->dev = dev;
mutex_init(&chip->audio_mutex);
chip->vchi_ctx = devres_find(dev,
bcm2835_devm_free_vchi_ctx, NULL, NULL);
if (!chip->vchi_ctx) {
err = -ENODEV;
goto error;
}
strscpy(card->driver, audio_driver->driver.name, sizeof(card->driver));
strscpy(card->shortname, audio_driver->shortname, sizeof(card->shortname));
strscpy(card->longname, audio_driver->longname, sizeof(card->longname));
err = audio_driver->newpcm(chip, audio_driver->shortname,
audio_driver->route,
numchans);
if (err) {
dev_err(dev, "Failed to create pcm, error %d\n", err);
goto error;
}
err = audio_driver->newctl(chip);
if (err) {
dev_err(dev, "Failed to create controls, error %d\n", err);
goto error;
}
err = snd_card_register(card);
if (err) {
dev_err(dev, "Failed to register card, error %d\n", err);
goto error;
}
dev_set_drvdata(dev, chip);
err = devm_add_action(dev, bcm2835_card_free, card);
if (err < 0) {
dev_err(dev, "Failed to add devm action, err %d\n", err);
goto error;
}
dev_info(dev, "card created with %d channels\n", numchans);
return 0;
error:
snd_card_free(card);
return err;
}
static int snd_add_child_devices(struct device *device, u32 numchans)
{
int extrachannels_per_driver = 0;
int extrachannels_remainder = 0;
int count_devices = 0;
int extrachannels = 0;
int minchannels = 0;
int i;
for (i = 0; i < ARRAY_SIZE(children_devices); i++)
if (*children_devices[i].is_enabled)
count_devices++;
if (!count_devices)
return 0;
for (i = 0; i < ARRAY_SIZE(children_devices); i++)
if (*children_devices[i].is_enabled)
minchannels +=
children_devices[i].audio_driver->minchannels;
if (minchannels < numchans) {
extrachannels = numchans - minchannels;
extrachannels_per_driver = extrachannels / count_devices;
extrachannels_remainder = extrachannels % count_devices;
}
dev_dbg(device, "minchannels %d\n", minchannels);
dev_dbg(device, "extrachannels %d\n", extrachannels);
dev_dbg(device, "extrachannels_per_driver %d\n",
extrachannels_per_driver);
dev_dbg(device, "extrachannels_remainder %d\n",
extrachannels_remainder);
for (i = 0; i < ARRAY_SIZE(children_devices); i++) {
struct bcm2835_audio_driver *audio_driver;
int numchannels_this_device;
int err;
if (!*children_devices[i].is_enabled)
continue;
audio_driver = children_devices[i].audio_driver;
if (audio_driver->minchannels > numchans) {
dev_err(device,
"Out of channels, needed %d but only %d left\n",
audio_driver->minchannels,
numchans);
continue;
}
numchannels_this_device =
audio_driver->minchannels + extrachannels_per_driver +
extrachannels_remainder;
extrachannels_remainder = 0;
numchans -= numchannels_this_device;
err = snd_add_child_device(device, audio_driver,
numchannels_this_device);
if (err)
return err;
}
return 0;
}
static int snd_bcm2835_alsa_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int err;
if (num_channels <= 0 || num_channels > MAX_SUBSTREAMS) {
num_channels = MAX_SUBSTREAMS;
dev_warn(dev, "Illegal num_channels value, will use %u\n",
num_channels);
}
err = bcm2835_devm_add_vchi_ctx(dev);
if (err)
return err;
err = snd_add_child_devices(dev, num_channels);
if (err)
return err;
return 0;
}
#ifdef CONFIG_PM
static int snd_bcm2835_alsa_suspend(struct platform_device *pdev,
pm_message_t state)
{
return 0;
}
static int snd_bcm2835_alsa_resume(struct platform_device *pdev)
{
return 0;
}
#endif
static struct platform_driver bcm2835_alsa_driver = {
.probe = snd_bcm2835_alsa_probe,
#ifdef CONFIG_PM
.suspend = snd_bcm2835_alsa_suspend,
.resume = snd_bcm2835_alsa_resume,
#endif
.driver = {
.name = "bcm2835_audio",
},
};
module_platform_driver(bcm2835_alsa_driver);
MODULE_AUTHOR("Dom Cobley");
MODULE_DESCRIPTION("Alsa driver for BCM2835 chip");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:bcm2835_audio");
| linux-master | drivers/staging/vc04_services/bcm2835-audio/bcm2835.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011 Broadcom Corporation. All rights reserved. */
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/completion.h>
#include "bcm2835.h"
#include "vc_vchi_audioserv_defs.h"
struct bcm2835_audio_instance {
struct device *dev;
unsigned int service_handle;
struct completion msg_avail_comp;
struct mutex vchi_mutex; /* Serialize vchiq access */
struct bcm2835_alsa_stream *alsa_stream;
int result;
unsigned int max_packet;
short peer_version;
};
static bool force_bulk;
module_param(force_bulk, bool, 0444);
MODULE_PARM_DESC(force_bulk, "Force use of vchiq bulk for audio");
static void bcm2835_audio_lock(struct bcm2835_audio_instance *instance)
{
mutex_lock(&instance->vchi_mutex);
vchiq_use_service(instance->alsa_stream->chip->vchi_ctx->instance,
instance->service_handle);
}
static void bcm2835_audio_unlock(struct bcm2835_audio_instance *instance)
{
vchiq_release_service(instance->alsa_stream->chip->vchi_ctx->instance,
instance->service_handle);
mutex_unlock(&instance->vchi_mutex);
}
static int bcm2835_audio_send_msg_locked(struct bcm2835_audio_instance *instance,
struct vc_audio_msg *m, bool wait)
{
int status;
if (wait) {
instance->result = -1;
init_completion(&instance->msg_avail_comp);
}
status = vchiq_queue_kernel_message(instance->alsa_stream->chip->vchi_ctx->instance,
instance->service_handle, m, sizeof(*m));
if (status) {
dev_err(instance->dev,
"vchi message queue failed: %d, msg=%d\n",
status, m->type);
return -EIO;
}
if (wait) {
if (!wait_for_completion_timeout(&instance->msg_avail_comp,
msecs_to_jiffies(10 * 1000))) {
dev_err(instance->dev,
"vchi message timeout, msg=%d\n", m->type);
return -ETIMEDOUT;
} else if (instance->result) {
dev_err(instance->dev,
"vchi message response error:%d, msg=%d\n",
instance->result, m->type);
return -EIO;
}
}
return 0;
}
static int bcm2835_audio_send_msg(struct bcm2835_audio_instance *instance,
struct vc_audio_msg *m, bool wait)
{
int err;
bcm2835_audio_lock(instance);
err = bcm2835_audio_send_msg_locked(instance, m, wait);
bcm2835_audio_unlock(instance);
return err;
}
static int bcm2835_audio_send_simple(struct bcm2835_audio_instance *instance,
int type, bool wait)
{
struct vc_audio_msg m = { .type = type };
return bcm2835_audio_send_msg(instance, &m, wait);
}
static int audio_vchi_callback(struct vchiq_instance *vchiq_instance,
enum vchiq_reason reason,
struct vchiq_header *header,
unsigned int handle, void *userdata)
{
struct bcm2835_audio_instance *instance = vchiq_get_service_userdata(vchiq_instance,
handle);
struct vc_audio_msg *m;
if (reason != VCHIQ_MESSAGE_AVAILABLE)
return 0;
m = (void *)header->data;
if (m->type == VC_AUDIO_MSG_TYPE_RESULT) {
instance->result = m->result.success;
complete(&instance->msg_avail_comp);
} else if (m->type == VC_AUDIO_MSG_TYPE_COMPLETE) {
if (m->complete.cookie1 != VC_AUDIO_WRITE_COOKIE1 ||
m->complete.cookie2 != VC_AUDIO_WRITE_COOKIE2)
dev_err(instance->dev, "invalid cookie\n");
else
bcm2835_playback_fifo(instance->alsa_stream,
m->complete.count);
} else {
dev_err(instance->dev, "unexpected callback type=%d\n", m->type);
}
vchiq_release_message(vchiq_instance, instance->service_handle, header);
return 0;
}
static int
vc_vchi_audio_init(struct vchiq_instance *vchiq_instance,
struct bcm2835_audio_instance *instance)
{
struct vchiq_service_params_kernel params = {
.version = VC_AUDIOSERV_VER,
.version_min = VC_AUDIOSERV_MIN_VER,
.fourcc = VCHIQ_MAKE_FOURCC('A', 'U', 'D', 'S'),
.callback = audio_vchi_callback,
.userdata = instance,
};
int status;
/* Open the VCHI service connections */
status = vchiq_open_service(vchiq_instance, ¶ms,
&instance->service_handle);
if (status) {
dev_err(instance->dev,
"failed to open VCHI service connection (status=%d)\n",
status);
return -EPERM;
}
/* Finished with the service for now */
vchiq_release_service(instance->alsa_stream->chip->vchi_ctx->instance,
instance->service_handle);
return 0;
}
static void vc_vchi_audio_deinit(struct bcm2835_audio_instance *instance)
{
int status;
mutex_lock(&instance->vchi_mutex);
vchiq_use_service(instance->alsa_stream->chip->vchi_ctx->instance,
instance->service_handle);
/* Close all VCHI service connections */
status = vchiq_close_service(instance->alsa_stream->chip->vchi_ctx->instance,
instance->service_handle);
if (status) {
dev_err(instance->dev,
"failed to close VCHI service connection (status=%d)\n",
status);
}
mutex_unlock(&instance->vchi_mutex);
}
int bcm2835_new_vchi_ctx(struct device *dev, struct bcm2835_vchi_ctx *vchi_ctx)
{
int ret;
/* Initialize and create a VCHI connection */
ret = vchiq_initialise(&vchi_ctx->instance);
if (ret) {
dev_err(dev, "failed to initialise VCHI instance (ret=%d)\n",
ret);
return -EIO;
}
ret = vchiq_connect(vchi_ctx->instance);
if (ret) {
dev_dbg(dev, "failed to connect VCHI instance (ret=%d)\n",
ret);
kfree(vchi_ctx->instance);
vchi_ctx->instance = NULL;
return -EIO;
}
return 0;
}
void bcm2835_free_vchi_ctx(struct bcm2835_vchi_ctx *vchi_ctx)
{
/* Close the VCHI connection - it will also free vchi_ctx->instance */
WARN_ON(vchiq_shutdown(vchi_ctx->instance));
vchi_ctx->instance = NULL;
}
int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
{
struct bcm2835_vchi_ctx *vchi_ctx = alsa_stream->chip->vchi_ctx;
struct bcm2835_audio_instance *instance;
int err;
/* Allocate memory for this instance */
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance)
return -ENOMEM;
mutex_init(&instance->vchi_mutex);
instance->dev = alsa_stream->chip->dev;
instance->alsa_stream = alsa_stream;
alsa_stream->instance = instance;
err = vc_vchi_audio_init(vchi_ctx->instance,
instance);
if (err < 0)
goto free_instance;
err = bcm2835_audio_send_simple(instance, VC_AUDIO_MSG_TYPE_OPEN,
false);
if (err < 0)
goto deinit;
bcm2835_audio_lock(instance);
vchiq_get_peer_version(vchi_ctx->instance, instance->service_handle,
&instance->peer_version);
bcm2835_audio_unlock(instance);
if (instance->peer_version < 2 || force_bulk)
instance->max_packet = 0; /* bulk transfer */
else
instance->max_packet = 4000;
return 0;
deinit:
vc_vchi_audio_deinit(instance);
free_instance:
alsa_stream->instance = NULL;
kfree(instance);
return err;
}
int bcm2835_audio_set_ctls(struct bcm2835_alsa_stream *alsa_stream)
{
struct bcm2835_chip *chip = alsa_stream->chip;
struct vc_audio_msg m = {};
m.type = VC_AUDIO_MSG_TYPE_CONTROL;
m.control.dest = chip->dest;
if (!chip->mute)
m.control.volume = CHIP_MIN_VOLUME;
else
m.control.volume = alsa2chip(chip->volume);
return bcm2835_audio_send_msg(alsa_stream->instance, &m, true);
}
int bcm2835_audio_set_params(struct bcm2835_alsa_stream *alsa_stream,
unsigned int channels, unsigned int samplerate,
unsigned int bps)
{
struct vc_audio_msg m = {
.type = VC_AUDIO_MSG_TYPE_CONFIG,
.config.channels = channels,
.config.samplerate = samplerate,
.config.bps = bps,
};
int err;
/* resend ctls - alsa_stream may not have been open when first send */
err = bcm2835_audio_set_ctls(alsa_stream);
if (err)
return err;
return bcm2835_audio_send_msg(alsa_stream->instance, &m, true);
}
int bcm2835_audio_start(struct bcm2835_alsa_stream *alsa_stream)
{
return bcm2835_audio_send_simple(alsa_stream->instance,
VC_AUDIO_MSG_TYPE_START, false);
}
int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream)
{
return bcm2835_audio_send_simple(alsa_stream->instance,
VC_AUDIO_MSG_TYPE_STOP, false);
}
/* FIXME: this doesn't seem working as expected for "draining" */
int bcm2835_audio_drain(struct bcm2835_alsa_stream *alsa_stream)
{
struct vc_audio_msg m = {
.type = VC_AUDIO_MSG_TYPE_STOP,
.stop.draining = 1,
};
return bcm2835_audio_send_msg(alsa_stream->instance, &m, false);
}
int bcm2835_audio_close(struct bcm2835_alsa_stream *alsa_stream)
{
struct bcm2835_audio_instance *instance = alsa_stream->instance;
int err;
err = bcm2835_audio_send_simple(alsa_stream->instance,
VC_AUDIO_MSG_TYPE_CLOSE, true);
/* Stop the audio service */
vc_vchi_audio_deinit(instance);
alsa_stream->instance = NULL;
kfree(instance);
return err;
}
int bcm2835_audio_write(struct bcm2835_alsa_stream *alsa_stream,
unsigned int size, void *src)
{
struct bcm2835_audio_instance *instance = alsa_stream->instance;
struct bcm2835_vchi_ctx *vchi_ctx = alsa_stream->chip->vchi_ctx;
struct vchiq_instance *vchiq_instance = vchi_ctx->instance;
struct vc_audio_msg m = {
.type = VC_AUDIO_MSG_TYPE_WRITE,
.write.count = size,
.write.max_packet = instance->max_packet,
.write.cookie1 = VC_AUDIO_WRITE_COOKIE1,
.write.cookie2 = VC_AUDIO_WRITE_COOKIE2,
};
unsigned int count;
int err, status;
if (!size)
return 0;
bcm2835_audio_lock(instance);
err = bcm2835_audio_send_msg_locked(instance, &m, false);
if (err < 0)
goto unlock;
count = size;
if (!instance->max_packet) {
/* Send the message to the videocore */
status = vchiq_bulk_transmit(vchiq_instance, instance->service_handle, src, count,
NULL, VCHIQ_BULK_MODE_BLOCKING);
} else {
while (count > 0) {
int bytes = min(instance->max_packet, count);
status = vchiq_queue_kernel_message(vchiq_instance,
instance->service_handle, src, bytes);
src += bytes;
count -= bytes;
}
}
if (status) {
dev_err(instance->dev,
"failed on %d bytes transfer (status=%d)\n",
size, status);
err = -EIO;
}
unlock:
bcm2835_audio_unlock(instance);
return err;
}
| linux-master | drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011 Broadcom Corporation. All rights reserved. */
#include <sound/core.h>
#include <sound/control.h>
#include <sound/tlv.h>
#include <sound/asoundef.h>
#include "bcm2835.h"
/* volume maximum and minimum in terms of 0.01dB */
#define CTRL_VOL_MAX 400
#define CTRL_VOL_MIN -10239 /* originally -10240 */
static int bcm2835_audio_set_chip_ctls(struct bcm2835_chip *chip)
{
int i, err = 0;
/* change ctls for all substreams */
for (i = 0; i < MAX_SUBSTREAMS; i++) {
if (chip->alsa_stream[i]) {
err = bcm2835_audio_set_ctls(chip->alsa_stream[i]);
if (err < 0)
break;
}
}
return err;
}
static int snd_bcm2835_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
if (kcontrol->private_value == PCM_PLAYBACK_VOLUME) {
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = CTRL_VOL_MIN;
uinfo->value.integer.max = CTRL_VOL_MAX; /* 2303 */
} else if (kcontrol->private_value == PCM_PLAYBACK_MUTE) {
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
} else if (kcontrol->private_value == PCM_PLAYBACK_DEVICE) {
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = AUDIO_DEST_MAX - 1;
}
return 0;
}
static int snd_bcm2835_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
mutex_lock(&chip->audio_mutex);
if (kcontrol->private_value == PCM_PLAYBACK_VOLUME)
ucontrol->value.integer.value[0] = chip->volume;
else if (kcontrol->private_value == PCM_PLAYBACK_MUTE)
ucontrol->value.integer.value[0] = chip->mute;
else if (kcontrol->private_value == PCM_PLAYBACK_DEVICE)
ucontrol->value.integer.value[0] = chip->dest;
mutex_unlock(&chip->audio_mutex);
return 0;
}
static int snd_bcm2835_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
int val, *valp;
int changed = 0;
if (kcontrol->private_value == PCM_PLAYBACK_VOLUME)
valp = &chip->volume;
else if (kcontrol->private_value == PCM_PLAYBACK_MUTE)
valp = &chip->mute;
else if (kcontrol->private_value == PCM_PLAYBACK_DEVICE)
valp = &chip->dest;
else
return -EINVAL;
val = ucontrol->value.integer.value[0];
mutex_lock(&chip->audio_mutex);
if (val != *valp) {
*valp = val;
changed = 1;
if (bcm2835_audio_set_chip_ctls(chip))
dev_err(chip->card->dev, "Failed to set ALSA controls..\n");
}
mutex_unlock(&chip->audio_mutex);
return changed;
}
static DECLARE_TLV_DB_SCALE(snd_bcm2835_db_scale, CTRL_VOL_MIN, 1, 1);
static const struct snd_kcontrol_new snd_bcm2835_ctl[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ,
.private_value = PCM_PLAYBACK_VOLUME,
.info = snd_bcm2835_ctl_info,
.get = snd_bcm2835_ctl_get,
.put = snd_bcm2835_ctl_put,
.tlv = {.p = snd_bcm2835_db_scale}
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.private_value = PCM_PLAYBACK_MUTE,
.info = snd_bcm2835_ctl_info,
.get = snd_bcm2835_ctl_get,
.put = snd_bcm2835_ctl_put,
},
};
static int snd_bcm2835_spdif_default_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int snd_bcm2835_spdif_default_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
int i;
mutex_lock(&chip->audio_mutex);
for (i = 0; i < 4; i++)
ucontrol->value.iec958.status[i] =
(chip->spdif_status >> (i * 8)) & 0xff;
mutex_unlock(&chip->audio_mutex);
return 0;
}
static int snd_bcm2835_spdif_default_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
unsigned int val = 0;
int i, change;
mutex_lock(&chip->audio_mutex);
for (i = 0; i < 4; i++)
val |= (unsigned int)ucontrol->value.iec958.status[i] << (i * 8);
change = val != chip->spdif_status;
chip->spdif_status = val;
mutex_unlock(&chip->audio_mutex);
return change;
}
static int snd_bcm2835_spdif_mask_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int snd_bcm2835_spdif_mask_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
/*
* bcm2835 supports only consumer mode and sets all other format flags
* automatically. So the only thing left is signalling non-audio content
*/
ucontrol->value.iec958.status[0] = IEC958_AES0_NONAUDIO;
return 0;
}
static const struct snd_kcontrol_new snd_bcm2835_spdif[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
.info = snd_bcm2835_spdif_default_info,
.get = snd_bcm2835_spdif_default_get,
.put = snd_bcm2835_spdif_default_put
},
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK),
.info = snd_bcm2835_spdif_mask_info,
.get = snd_bcm2835_spdif_mask_get,
},
};
static int create_ctls(struct bcm2835_chip *chip, size_t size,
const struct snd_kcontrol_new *kctls)
{
int i, err;
for (i = 0; i < size; i++) {
err = snd_ctl_add(chip->card, snd_ctl_new1(&kctls[i], chip));
if (err < 0)
return err;
}
return 0;
}
int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip)
{
strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_ctl),
snd_bcm2835_ctl);
}
int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip)
{
int err;
strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
err = create_ctls(chip, ARRAY_SIZE(snd_bcm2835_ctl), snd_bcm2835_ctl);
if (err < 0)
return err;
return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_spdif),
snd_bcm2835_spdif);
}
| linux-master | drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* Purpose: handle WMAC/802.3/802.11 rx & tx functions
*
* Author: Lyndon Chen
*
* Date: May 20, 2003
*
* Functions:
* s_vGenerateTxParameter - Generate tx dma required parameter.
* vGenerateMACHeader - Translate 802.3 to 802.11 header
* cbGetFragCount - Calculate fragment number count
* csBeacon_xmit - beacon tx function
* csMgmt_xmit - management tx function
* s_cbFillTxBufHead - fulfill tx dma buffer header
* s_uGetDataDuration - get tx data required duration
* s_uFillDataHead- fulfill tx data duration header
* s_uGetRTSCTSDuration- get rtx/cts required duration
* get_rtscts_time- get rts/cts reserved time
* s_uGetTxRsvTime- get frame reserved time
* s_vFillCTSHead- fulfill CTS ctl header
* s_vFillFragParameter- Set fragment ctl parameter.
* s_vFillRTSHead- fulfill RTS ctl header
* s_vFillTxKey- fulfill tx encrypt key
* s_vSWencryption- Software encrypt header
* vDMA0_tx_80211- tx 802.11 frame via dma0
* vGenerateFIFOHeader- Generate tx FIFO ctl header
*
* Revision History:
*
*/
#include "device.h"
#include "rxtx.h"
#include "card.h"
#include "mac.h"
#include "baseband.h"
#include "rf.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
/*--------------------- Static Definitions -------------------------*/
/* if packet size < 256 -> in-direct send
* vpacket size >= 256 -> direct send
*/
#define CRITICAL_PACKET_LEN 256
static const unsigned short time_stamp_off[2][MAX_RATE] = {
{384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23}, /* Long Preamble */
{384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23}, /* Short Preamble */
};
static const unsigned short fb_opt0[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_36M, RATE_48M}, /* fallback_rate0 */
{RATE_12M, RATE_12M, RATE_18M, RATE_24M, RATE_36M}, /* fallback_rate1 */
};
static const unsigned short fb_opt1[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_24M, RATE_36M}, /* fallback_rate0 */
{RATE_6M, RATE_6M, RATE_12M, RATE_12M, RATE_18M}, /* fallback_rate1 */
};
#define RTSDUR_BB 0
#define RTSDUR_BA 1
#define RTSDUR_AA 2
#define CTSDUR_BA 3
#define RTSDUR_BA_F0 4
#define RTSDUR_AA_F0 5
#define RTSDUR_BA_F1 6
#define RTSDUR_AA_F1 7
#define CTSDUR_BA_F0 8
#define CTSDUR_BA_F1 9
#define DATADUR_B 10
#define DATADUR_A 11
#define DATADUR_A_F0 12
#define DATADUR_A_F1 13
/*--------------------- Static Functions --------------------------*/
static
void
s_vFillRTSHead(
struct vnt_private *pDevice,
unsigned char byPktType,
void *pvRTS,
unsigned int cbFrameLength,
bool bNeedAck,
bool bDisCRC,
struct ieee80211_hdr *hdr,
unsigned short wCurrentRate,
unsigned char byFBOption
);
static
void
s_vGenerateTxParameter(
struct vnt_private *pDevice,
unsigned char byPktType,
struct vnt_tx_fifo_head *,
void *pvRrvTime,
void *pvRTS,
void *pvCTS,
unsigned int cbFrameSize,
bool bNeedACK,
unsigned int uDMAIdx,
void *psEthHeader,
unsigned short wCurrentRate
);
static unsigned int
s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
unsigned char *pbyTxBufferAddr,
unsigned int uDMAIdx, struct vnt_tx_desc *pHeadTD,
unsigned int uNodeIndex);
static
__le16
s_uFillDataHead(
struct vnt_private *pDevice,
unsigned char byPktType,
void *pTxDataHead,
unsigned int cbFrameLength,
unsigned int uDMAIdx,
bool bNeedAck,
unsigned int uFragIdx,
unsigned int cbLastFragmentSize,
unsigned int uMACfragNum,
unsigned char byFBOption,
unsigned short wCurrentRate,
bool is_pspoll
);
/*--------------------- Export Variables --------------------------*/
static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
{
return cpu_to_le16(time_stamp_off[priv->preamble_type % 2]
[rate % MAX_RATE]);
}
/* byPktType : PK_TYPE_11A 0
* PK_TYPE_11B 1
* PK_TYPE_11GB 2
* PK_TYPE_11GA 3
*/
static
unsigned int
s_uGetTxRsvTime(
struct vnt_private *pDevice,
unsigned char byPktType,
unsigned int cbFrameLength,
unsigned short wRate,
bool bNeedAck
)
{
unsigned int uDataTime, uAckTime;
uDataTime = bb_get_frame_time(pDevice->preamble_type, byPktType, cbFrameLength, wRate);
if (!bNeedAck)
return uDataTime;
/*
* CCK mode - 11b
* OFDM mode - 11g 2.4G & 11a 5G
*/
uAckTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14,
byPktType == PK_TYPE_11B ?
pDevice->byTopCCKBasicRate :
pDevice->byTopOFDMBasicRate);
return uDataTime + pDevice->uSIFS + uAckTime;
}
static __le16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
u32 frame_length, u16 rate, bool need_ack)
{
return cpu_to_le16((u16)s_uGetTxRsvTime(priv, pkt_type,
frame_length, rate, need_ack));
}
/* byFreqType: 0=>5GHZ 1=>2.4GHZ */
static __le16 get_rtscts_time(struct vnt_private *priv,
unsigned char rts_rsvtype,
unsigned char pkt_type,
unsigned int frame_length,
unsigned short current_rate)
{
unsigned int rrv_time = 0;
unsigned int rts_time = 0;
unsigned int cts_time = 0;
unsigned int ack_time = 0;
unsigned int data_time = 0;
data_time = bb_get_frame_time(priv->preamble_type, pkt_type, frame_length, current_rate);
if (rts_rsvtype == 0) { /* RTSTxRrvTime_bb */
rts_time = bb_get_frame_time(priv->preamble_type, pkt_type, 20, priv->byTopCCKBasicRate);
ack_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopCCKBasicRate);
cts_time = ack_time;
} else if (rts_rsvtype == 1) { /* RTSTxRrvTime_ba, only in 2.4GHZ */
rts_time = bb_get_frame_time(priv->preamble_type, pkt_type, 20, priv->byTopCCKBasicRate);
cts_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopCCKBasicRate);
ack_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopOFDMBasicRate);
} else if (rts_rsvtype == 2) { /* RTSTxRrvTime_aa */
rts_time = bb_get_frame_time(priv->preamble_type, pkt_type, 20, priv->byTopOFDMBasicRate);
ack_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopOFDMBasicRate);
cts_time = ack_time;
} else if (rts_rsvtype == 3) { /* CTSTxRrvTime_ba, only in 2.4GHZ */
cts_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopCCKBasicRate);
ack_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopOFDMBasicRate);
rrv_time = cts_time + ack_time + data_time + 2 * priv->uSIFS;
return cpu_to_le16((u16)rrv_time);
}
/* RTSRrvTime */
rrv_time = rts_time + cts_time + ack_time + data_time + 3 * priv->uSIFS;
return cpu_to_le16((u16)rrv_time);
}
/* byFreqType 0: 5GHz, 1:2.4Ghz */
static
unsigned int
s_uGetDataDuration(
struct vnt_private *pDevice,
unsigned char byDurType,
unsigned int cbFrameLength,
unsigned char byPktType,
unsigned short wRate,
bool bNeedAck,
unsigned int uFragIdx,
unsigned int cbLastFragmentSize,
unsigned int uMACfragNum,
unsigned char byFBOption
)
{
bool bLastFrag = false;
unsigned int uAckTime = 0, uNextPktTime = 0, len;
if (uFragIdx == (uMACfragNum - 1))
bLastFrag = true;
if (uFragIdx == (uMACfragNum - 2))
len = cbLastFragmentSize;
else
len = cbFrameLength;
switch (byDurType) {
case DATADUR_B: /* DATADUR_B */
if (bNeedAck) {
uAckTime = bb_get_frame_time(pDevice->preamble_type,
byPktType, 14,
pDevice->byTopCCKBasicRate);
}
/* Non Frag or Last Frag */
if ((uMACfragNum == 1) || bLastFrag) {
if (!bNeedAck)
return 0;
} else {
/* First Frag or Mid Frag */
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType,
len, wRate, bNeedAck);
}
return pDevice->uSIFS + uAckTime + uNextPktTime;
case DATADUR_A: /* DATADUR_A */
if (bNeedAck) {
uAckTime = bb_get_frame_time(pDevice->preamble_type,
byPktType, 14,
pDevice->byTopOFDMBasicRate);
}
/* Non Frag or Last Frag */
if ((uMACfragNum == 1) || bLastFrag) {
if (!bNeedAck)
return 0;
} else {
/* First Frag or Mid Frag */
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType,
len, wRate, bNeedAck);
}
return pDevice->uSIFS + uAckTime + uNextPktTime;
case DATADUR_A_F0: /* DATADUR_A_F0 */
case DATADUR_A_F1: /* DATADUR_A_F1 */
if (bNeedAck) {
uAckTime = bb_get_frame_time(pDevice->preamble_type,
byPktType, 14,
pDevice->byTopOFDMBasicRate);
}
/* Non Frag or Last Frag */
if ((uMACfragNum == 1) || bLastFrag) {
if (!bNeedAck)
return 0;
} else {
/* First Frag or Mid Frag */
if (wRate < RATE_18M)
wRate = RATE_18M;
else if (wRate > RATE_54M)
wRate = RATE_54M;
wRate -= RATE_18M;
if (byFBOption == AUTO_FB_0)
wRate = fb_opt0[FB_RATE0][wRate];
else
wRate = fb_opt1[FB_RATE0][wRate];
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType,
len, wRate, bNeedAck);
}
return pDevice->uSIFS + uAckTime + uNextPktTime;
default:
break;
}
return 0;
}
/* byFreqType: 0=>5GHZ 1=>2.4GHZ */
static
__le16
s_uGetRTSCTSDuration(
struct vnt_private *pDevice,
unsigned char byDurType,
unsigned int cbFrameLength,
unsigned char byPktType,
unsigned short wRate,
bool bNeedAck,
unsigned char byFBOption
)
{
unsigned int uCTSTime = 0, uDurTime = 0;
switch (byDurType) {
case RTSDUR_BB: /* RTSDuration_bb */
uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_BA: /* RTSDuration_ba */
uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_AA: /* RTSDuration_aa */
uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopOFDMBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case CTSDUR_BA: /* CTSDuration_ba */
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_BA_F0: /* RTSDuration_ba_f0 */
uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE0][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE0][wRate - RATE_18M], bNeedAck);
break;
case RTSDUR_AA_F0: /* RTSDuration_aa_f0 */
uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE0][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE0][wRate - RATE_18M], bNeedAck);
break;
case RTSDUR_BA_F1: /* RTSDuration_ba_f1 */
uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE1][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE1][wRate - RATE_18M], bNeedAck);
break;
case RTSDUR_AA_F1: /* RTSDuration_aa_f1 */
uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE1][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE1][wRate - RATE_18M], bNeedAck);
break;
case CTSDUR_BA_F0: /* CTSDuration_ba_f0 */
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE0][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE0][wRate - RATE_18M], bNeedAck);
break;
case CTSDUR_BA_F1: /* CTSDuration_ba_f1 */
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE1][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE1][wRate - RATE_18M], bNeedAck);
break;
default:
break;
}
return cpu_to_le16((u16)uDurTime);
}
static
__le16
s_uFillDataHead(
struct vnt_private *pDevice,
unsigned char byPktType,
void *pTxDataHead,
unsigned int cbFrameLength,
unsigned int uDMAIdx,
bool bNeedAck,
unsigned int uFragIdx,
unsigned int cbLastFragmentSize,
unsigned int uMACfragNum,
unsigned char byFBOption,
unsigned short wCurrentRate,
bool is_pspoll
)
{
struct vnt_tx_datahead_ab *buf = pTxDataHead;
if (!pTxDataHead)
return 0;
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
/* Auto Fallback */
struct vnt_tx_datahead_g_fb *buf = pTxDataHead;
if (byFBOption == AUTO_FB_NONE) {
struct vnt_tx_datahead_g *buf = pTxDataHead;
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, cbFrameLength, wCurrentRate,
byPktType, &buf->a);
vnt_get_phy_field(pDevice, cbFrameLength,
pDevice->byTopCCKBasicRate,
PK_TYPE_11B, &buf->b);
if (is_pspoll) {
__le16 dur = cpu_to_le16(pDevice->current_aid | BIT(14) | BIT(15));
buf->duration_a = dur;
buf->duration_b = dur;
} else {
/* Get Duration and TimeStamp */
buf->duration_a =
cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength,
byPktType, wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
buf->duration_b =
cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength,
PK_TYPE_11B, pDevice->byTopCCKBasicRate,
bNeedAck, uFragIdx, cbLastFragmentSize,
uMACfragNum, byFBOption));
}
buf->time_stamp_off_a = vnt_time_stamp_off(pDevice, wCurrentRate);
buf->time_stamp_off_b = vnt_time_stamp_off(pDevice, pDevice->byTopCCKBasicRate);
return buf->duration_a;
}
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, cbFrameLength, wCurrentRate,
byPktType, &buf->a);
vnt_get_phy_field(pDevice, cbFrameLength,
pDevice->byTopCCKBasicRate,
PK_TYPE_11B, &buf->b);
/* Get Duration and TimeStamp */
buf->duration_a = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_b = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B,
pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_a_f0 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_a_f1 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->time_stamp_off_a = vnt_time_stamp_off(pDevice, wCurrentRate);
buf->time_stamp_off_b = vnt_time_stamp_off(pDevice, pDevice->byTopCCKBasicRate);
return buf->duration_a;
/* if (byFBOption == AUTO_FB_NONE) */
} else if (byPktType == PK_TYPE_11A) {
struct vnt_tx_datahead_ab *buf = pTxDataHead;
if (byFBOption != AUTO_FB_NONE) {
/* Auto Fallback */
struct vnt_tx_datahead_a_fb *buf = pTxDataHead;
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, cbFrameLength, wCurrentRate,
byPktType, &buf->a);
/* Get Duration and TimeStampOff */
buf->duration = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_f0 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_f1 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate);
return buf->duration;
}
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, cbFrameLength, wCurrentRate,
byPktType, &buf->ab);
if (is_pspoll) {
__le16 dur = cpu_to_le16(pDevice->current_aid | BIT(14) | BIT(15));
buf->duration = dur;
} else {
/* Get Duration and TimeStampOff */
buf->duration =
cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
}
buf->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate);
return buf->duration;
}
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, cbFrameLength, wCurrentRate,
byPktType, &buf->ab);
if (is_pspoll) {
__le16 dur = cpu_to_le16(pDevice->current_aid | BIT(14) | BIT(15));
buf->duration = dur;
} else {
/* Get Duration and TimeStampOff */
buf->duration =
cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
}
buf->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate);
return buf->duration;
}
static
void
s_vFillRTSHead(
struct vnt_private *pDevice,
unsigned char byPktType,
void *pvRTS,
unsigned int cbFrameLength,
bool bNeedAck,
bool bDisCRC,
struct ieee80211_hdr *hdr,
unsigned short wCurrentRate,
unsigned char byFBOption
)
{
unsigned int uRTSFrameLen = 20;
if (!pvRTS)
return;
if (bDisCRC) {
/* When CRCDIS bit is on, H/W forgot to generate FCS for
* RTS frame, in this case we need to decrease its length by 4.
*/
uRTSFrameLen -= 4;
}
/* Note: So far RTSHead doesn't appear in ATIM & Beacom DMA,
* so we don't need to take them into account.
* Otherwise, we need to modify codes for them.
*/
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (byFBOption == AUTO_FB_NONE) {
struct vnt_rts_g *buf = pvRTS;
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, uRTSFrameLen,
pDevice->byTopCCKBasicRate,
PK_TYPE_11B, &buf->b);
vnt_get_phy_field(pDevice, uRTSFrameLen,
pDevice->byTopOFDMBasicRate,
byPktType, &buf->a);
/* Get Duration */
buf->duration_bb =
s_uGetRTSCTSDuration(pDevice, RTSDUR_BB,
cbFrameLength, PK_TYPE_11B,
pDevice->byTopCCKBasicRate,
bNeedAck, byFBOption);
buf->duration_aa =
s_uGetRTSCTSDuration(pDevice, RTSDUR_AA,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->duration_ba =
s_uGetRTSCTSDuration(pDevice, RTSDUR_BA,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->data.duration = buf->duration_aa;
/* Get RTS Frame body */
buf->data.frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
ether_addr_copy(buf->data.ra, hdr->addr1);
ether_addr_copy(buf->data.ta, hdr->addr2);
} else {
struct vnt_rts_g_fb *buf = pvRTS;
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, uRTSFrameLen,
pDevice->byTopCCKBasicRate,
PK_TYPE_11B, &buf->b);
vnt_get_phy_field(pDevice, uRTSFrameLen,
pDevice->byTopOFDMBasicRate,
byPktType, &buf->a);
/* Get Duration */
buf->duration_bb =
s_uGetRTSCTSDuration(pDevice, RTSDUR_BB,
cbFrameLength, PK_TYPE_11B,
pDevice->byTopCCKBasicRate,
bNeedAck, byFBOption);
buf->duration_aa =
s_uGetRTSCTSDuration(pDevice, RTSDUR_AA,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->duration_ba =
s_uGetRTSCTSDuration(pDevice, RTSDUR_BA,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->rts_duration_ba_f0 =
s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F0,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->rts_duration_aa_f0 =
s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->rts_duration_ba_f1 =
s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F1,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->rts_duration_aa_f1 =
s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->data.duration = buf->duration_aa;
/* Get RTS Frame body */
buf->data.frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
ether_addr_copy(buf->data.ra, hdr->addr1);
ether_addr_copy(buf->data.ta, hdr->addr2);
} /* if (byFBOption == AUTO_FB_NONE) */
} else if (byPktType == PK_TYPE_11A) {
if (byFBOption == AUTO_FB_NONE) {
struct vnt_rts_ab *buf = pvRTS;
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, uRTSFrameLen,
pDevice->byTopOFDMBasicRate,
byPktType, &buf->ab);
/* Get Duration */
buf->duration =
s_uGetRTSCTSDuration(pDevice, RTSDUR_AA,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->data.duration = buf->duration;
/* Get RTS Frame body */
buf->data.frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
ether_addr_copy(buf->data.ra, hdr->addr1);
ether_addr_copy(buf->data.ta, hdr->addr2);
} else {
struct vnt_rts_a_fb *buf = pvRTS;
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, uRTSFrameLen,
pDevice->byTopOFDMBasicRate,
byPktType, &buf->a);
/* Get Duration */
buf->duration =
s_uGetRTSCTSDuration(pDevice, RTSDUR_AA,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->rts_duration_f0 =
s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->rts_duration_f1 =
s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
buf->data.duration = buf->duration;
/* Get RTS Frame body */
buf->data.frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
ether_addr_copy(buf->data.ra, hdr->addr1);
ether_addr_copy(buf->data.ta, hdr->addr2);
}
} else if (byPktType == PK_TYPE_11B) {
struct vnt_rts_ab *buf = pvRTS;
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, uRTSFrameLen,
pDevice->byTopCCKBasicRate,
PK_TYPE_11B, &buf->ab);
/* Get Duration */
buf->duration =
s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength,
byPktType, wCurrentRate, bNeedAck,
byFBOption);
buf->data.duration = buf->duration;
/* Get RTS Frame body */
buf->data.frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
ether_addr_copy(buf->data.ra, hdr->addr1);
ether_addr_copy(buf->data.ta, hdr->addr2);
}
}
static
void
s_vFillCTSHead(
struct vnt_private *pDevice,
unsigned int uDMAIdx,
unsigned char byPktType,
void *pvCTS,
unsigned int cbFrameLength,
bool bNeedAck,
bool bDisCRC,
unsigned short wCurrentRate,
unsigned char byFBOption
)
{
unsigned int uCTSFrameLen = 14;
if (!pvCTS)
return;
if (bDisCRC) {
/* When CRCDIS bit is on, H/W forgot to generate FCS for
* CTS frame, in this case we need to decrease its length by 4.
*/
uCTSFrameLen -= 4;
}
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA) {
/* Auto Fall back */
struct vnt_cts_fb *buf = pvCTS;
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, uCTSFrameLen,
pDevice->byTopCCKBasicRate,
PK_TYPE_11B, &buf->b);
buf->duration_ba =
s_uGetRTSCTSDuration(pDevice, CTSDUR_BA,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
/* Get CTSDuration_ba_f0 */
buf->cts_duration_ba_f0 =
s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F0,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
/* Get CTSDuration_ba_f1 */
buf->cts_duration_ba_f1 =
s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F1,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
/* Get CTS Frame body */
buf->data.duration = buf->duration_ba;
buf->data.frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_CTS);
buf->reserved2 = 0x0;
ether_addr_copy(buf->data.ra,
pDevice->abyCurrentNetAddr);
} else { /* if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA) */
struct vnt_cts *buf = pvCTS;
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, uCTSFrameLen,
pDevice->byTopCCKBasicRate,
PK_TYPE_11B, &buf->b);
/* Get CTSDuration_ba */
buf->duration_ba =
s_uGetRTSCTSDuration(pDevice, CTSDUR_BA,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck,
byFBOption);
/* Get CTS Frame body */
buf->data.duration = buf->duration_ba;
buf->data.frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_CTS);
buf->reserved2 = 0x0;
ether_addr_copy(buf->data.ra,
pDevice->abyCurrentNetAddr);
}
}
}
/*
*
* Description:
* Generate FIFO control for MAC & Baseband controller
*
* Parameters:
* In:
* pDevice - Pointer to adapter
* pTxDataHead - Transmit Data Buffer
* pTxBufHead - pTxBufHead
* pvRrvTime - pvRrvTime
* pvRTS - RTS Buffer
* pCTS - CTS Buffer
* cbFrameSize - Transmit Data Length (Hdr+Payload+FCS)
* bNeedACK - If need ACK
* uDescIdx - Desc Index
* Out:
* none
*
* Return Value: none
*
-
* unsigned int cbFrameSize, Hdr+Payload+FCS
*/
static
void
s_vGenerateTxParameter(
struct vnt_private *pDevice,
unsigned char byPktType,
struct vnt_tx_fifo_head *tx_buffer_head,
void *pvRrvTime,
void *pvRTS,
void *pvCTS,
unsigned int cbFrameSize,
bool bNeedACK,
unsigned int uDMAIdx,
void *psEthHeader,
unsigned short wCurrentRate
)
{
u16 fifo_ctl = le16_to_cpu(tx_buffer_head->fifo_ctl);
bool bDisCRC = false;
unsigned char byFBOption = AUTO_FB_NONE;
tx_buffer_head->current_rate = cpu_to_le16(wCurrentRate);
if (fifo_ctl & FIFOCTL_CRCDIS)
bDisCRC = true;
if (fifo_ctl & FIFOCTL_AUTO_FB_0)
byFBOption = AUTO_FB_0;
else if (fifo_ctl & FIFOCTL_AUTO_FB_1)
byFBOption = AUTO_FB_1;
if (!pvRrvTime)
return;
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (pvRTS) { /* RTS_need */
/* Fill RsvTime */
struct vnt_rrv_time_rts *buf = pvRrvTime;
buf->rts_rrv_time_aa = get_rtscts_time(pDevice, 2, byPktType, cbFrameSize, wCurrentRate);
buf->rts_rrv_time_ba = get_rtscts_time(pDevice, 1, byPktType, cbFrameSize, wCurrentRate);
buf->rts_rrv_time_bb = get_rtscts_time(pDevice, 0, byPktType, cbFrameSize, wCurrentRate);
buf->rrv_time_a = vnt_rxtx_rsvtime_le16(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK);
buf->rrv_time_b = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK);
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
} else {/* RTS_needless, PCF mode */
struct vnt_rrv_time_cts *buf = pvRrvTime;
buf->rrv_time_a = vnt_rxtx_rsvtime_le16(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK);
buf->rrv_time_b = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK);
buf->cts_rrv_time_ba = get_rtscts_time(pDevice, 3, byPktType, cbFrameSize, wCurrentRate);
/* Fill CTS */
s_vFillCTSHead(pDevice, uDMAIdx, byPktType, pvCTS, cbFrameSize, bNeedACK, bDisCRC, wCurrentRate, byFBOption);
}
} else if (byPktType == PK_TYPE_11A) {
if (pvRTS) {/* RTS_need, non PCF mode */
struct vnt_rrv_time_ab *buf = pvRrvTime;
buf->rts_rrv_time = get_rtscts_time(pDevice, 2, byPktType, cbFrameSize, wCurrentRate);
buf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK);
/* Fill RTS */
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
} else if (!pvRTS) {/* RTS_needless, non PCF mode */
struct vnt_rrv_time_ab *buf = pvRrvTime;
buf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11A, cbFrameSize, wCurrentRate, bNeedACK);
}
} else if (byPktType == PK_TYPE_11B) {
if (pvRTS) {/* RTS_need, non PCF mode */
struct vnt_rrv_time_ab *buf = pvRrvTime;
buf->rts_rrv_time = get_rtscts_time(pDevice, 0, byPktType, cbFrameSize, wCurrentRate);
buf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK);
/* Fill RTS */
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
} else { /* RTS_needless, non PCF mode */
struct vnt_rrv_time_ab *buf = pvRrvTime;
buf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK);
}
}
}
static unsigned int
s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
unsigned char *pbyTxBufferAddr,
unsigned int uDMAIdx, struct vnt_tx_desc *pHeadTD,
unsigned int is_pspoll)
{
struct vnt_td_info *td_info = pHeadTD->td_info;
struct sk_buff *skb = td_info->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct vnt_tx_fifo_head *tx_buffer_head =
(struct vnt_tx_fifo_head *)td_info->buf;
u16 fifo_ctl = le16_to_cpu(tx_buffer_head->fifo_ctl);
unsigned int cbFrameSize;
__le16 uDuration;
unsigned char *pbyBuffer;
unsigned int uLength = 0;
unsigned int cbMICHDR = 0;
unsigned int uMACfragNum = 1;
unsigned int uPadding = 0;
unsigned int cbReqCount = 0;
bool bNeedACK = (bool)(fifo_ctl & FIFOCTL_NEEDACK);
bool bRTS = (bool)(fifo_ctl & FIFOCTL_RTS);
struct vnt_tx_desc *ptdCurr;
unsigned int cbHeaderLength = 0;
void *pvRrvTime = NULL;
struct vnt_mic_hdr *pMICHDR = NULL;
void *pvRTS = NULL;
void *pvCTS = NULL;
void *pvTxDataHd = NULL;
unsigned short wTxBufSize; /* FFinfo size */
unsigned char byFBOption = AUTO_FB_NONE;
cbFrameSize = skb->len + 4;
if (info->control.hw_key) {
switch (info->control.hw_key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
cbMICHDR = sizeof(struct vnt_mic_hdr);
break;
default:
break;
}
cbFrameSize += info->control.hw_key->icv_len;
if (pDevice->local_id > REV_ID_VT3253_A1) {
/* MAC Header should be padding 0 to DW alignment. */
uPadding = 4 - (ieee80211_get_hdrlen_from_skb(skb) % 4);
uPadding %= 4;
}
}
/*
* Use for AUTO FALL BACK
*/
if (fifo_ctl & FIFOCTL_AUTO_FB_0)
byFBOption = AUTO_FB_0;
else if (fifo_ctl & FIFOCTL_AUTO_FB_1)
byFBOption = AUTO_FB_1;
/* Set RrvTime/RTS/CTS Buffer */
wTxBufSize = sizeof(struct vnt_tx_fifo_head);
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {/* 802.11g packet */
if (byFBOption == AUTO_FB_NONE) {
if (bRTS) {/* RTS_need */
pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_rts));
pvRTS = (void *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_rts) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
cbMICHDR + sizeof(struct vnt_rts_g));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
cbMICHDR + sizeof(struct vnt_rts_g) +
sizeof(struct vnt_tx_datahead_g);
} else { /* RTS_needless */
pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_cts));
pvRTS = NULL;
pvCTS = (void *) (pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize +
sizeof(struct vnt_rrv_time_cts) + cbMICHDR + sizeof(struct vnt_cts));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
cbMICHDR + sizeof(struct vnt_cts) + sizeof(struct vnt_tx_datahead_g);
}
} else {
/* Auto Fall Back */
if (bRTS) {/* RTS_need */
pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_rts));
pvRTS = (void *) (pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_rts) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
cbMICHDR + sizeof(struct vnt_rts_g_fb));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
cbMICHDR + sizeof(struct vnt_rts_g_fb) + sizeof(struct vnt_tx_datahead_g_fb);
} else { /* RTS_needless */
pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_cts));
pvRTS = NULL;
pvCTS = (void *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
cbMICHDR + sizeof(struct vnt_cts_fb));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
cbMICHDR + sizeof(struct vnt_cts_fb) + sizeof(struct vnt_tx_datahead_g_fb);
}
} /* Auto Fall Back */
} else {/* 802.11a/b packet */
if (byFBOption == AUTO_FB_NONE) {
if (bRTS) {
pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_ab));
pvRTS = (void *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize +
sizeof(struct vnt_rrv_time_ab) + cbMICHDR + sizeof(struct vnt_rts_ab));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
cbMICHDR + sizeof(struct vnt_rts_ab) + sizeof(struct vnt_tx_datahead_ab);
} else { /* RTS_needless, need MICHDR */
pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_ab));
pvRTS = NULL;
pvCTS = NULL;
pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
cbMICHDR + sizeof(struct vnt_tx_datahead_ab);
}
} else {
/* Auto Fall Back */
if (bRTS) { /* RTS_need */
pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_ab));
pvRTS = (void *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize +
sizeof(struct vnt_rrv_time_ab) + cbMICHDR + sizeof(struct vnt_rts_a_fb));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
cbMICHDR + sizeof(struct vnt_rts_a_fb) + sizeof(struct vnt_tx_datahead_a_fb);
} else { /* RTS_needless */
pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_ab));
pvRTS = NULL;
pvCTS = NULL;
pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
cbMICHDR + sizeof(struct vnt_tx_datahead_a_fb);
}
} /* Auto Fall Back */
}
td_info->mic_hdr = pMICHDR;
memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderLength - wTxBufSize));
/* Fill FIFO,RrvTime,RTS,and CTS */
s_vGenerateTxParameter(pDevice, byPktType, tx_buffer_head, pvRrvTime, pvRTS, pvCTS,
cbFrameSize, bNeedACK, uDMAIdx, hdr, pDevice->wCurrentRate);
/* Fill DataHead */
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
0, 0, uMACfragNum, byFBOption, pDevice->wCurrentRate, is_pspoll);
hdr->duration_id = uDuration;
cbReqCount = cbHeaderLength + uPadding + skb->len;
pbyBuffer = (unsigned char *)pHeadTD->td_info->buf;
uLength = cbHeaderLength + uPadding;
/* Copy the Packet into a tx Buffer */
memcpy((pbyBuffer + uLength), skb->data, skb->len);
ptdCurr = pHeadTD;
ptdCurr->td_info->req_count = (u16)cbReqCount;
return cbHeaderLength;
}
static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer,
struct ieee80211_key_conf *tx_key,
struct sk_buff *skb, u16 payload_len,
struct vnt_mic_hdr *mic_hdr)
{
u64 pn64;
u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb));
/* strip header and icv len from payload */
payload_len -= ieee80211_get_hdrlen_from_skb(skb);
payload_len -= tx_key->icv_len;
switch (tx_key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
memcpy(key_buffer, iv, 3);
memcpy(key_buffer + 3, tx_key->key, tx_key->keylen);
if (tx_key->keylen == WLAN_KEY_LEN_WEP40) {
memcpy(key_buffer + 8, iv, 3);
memcpy(key_buffer + 11,
tx_key->key, WLAN_KEY_LEN_WEP40);
}
break;
case WLAN_CIPHER_SUITE_TKIP:
ieee80211_get_tkip_p2k(tx_key, skb, key_buffer);
break;
case WLAN_CIPHER_SUITE_CCMP:
if (!mic_hdr)
return;
mic_hdr->id = 0x59;
mic_hdr->payload_len = cpu_to_be16(payload_len);
ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2);
pn64 = atomic64_read(&tx_key->tx_pn);
mic_hdr->ccmp_pn[5] = pn64;
mic_hdr->ccmp_pn[4] = pn64 >> 8;
mic_hdr->ccmp_pn[3] = pn64 >> 16;
mic_hdr->ccmp_pn[2] = pn64 >> 24;
mic_hdr->ccmp_pn[1] = pn64 >> 32;
mic_hdr->ccmp_pn[0] = pn64 >> 40;
if (ieee80211_has_a4(hdr->frame_control))
mic_hdr->hlen = cpu_to_be16(28);
else
mic_hdr->hlen = cpu_to_be16(22);
ether_addr_copy(mic_hdr->addr1, hdr->addr1);
ether_addr_copy(mic_hdr->addr2, hdr->addr2);
ether_addr_copy(mic_hdr->addr3, hdr->addr3);
mic_hdr->frame_control = cpu_to_le16(
le16_to_cpu(hdr->frame_control) & 0xc78f);
mic_hdr->seq_ctrl = cpu_to_le16(
le16_to_cpu(hdr->seq_ctrl) & 0xf);
if (ieee80211_has_a4(hdr->frame_control))
ether_addr_copy(mic_hdr->addr4, hdr->addr4);
memcpy(key_buffer, tx_key->key, WLAN_KEY_LEN_CCMP);
break;
default:
break;
}
}
int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
struct vnt_tx_desc *head_td, struct sk_buff *skb)
{
struct vnt_td_info *td_info = head_td->td_info;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *tx_rate = &info->control.rates[0];
struct ieee80211_rate *rate;
struct ieee80211_key_conf *tx_key;
struct ieee80211_hdr *hdr;
struct vnt_tx_fifo_head *tx_buffer_head =
(struct vnt_tx_fifo_head *)td_info->buf;
u16 tx_body_size = skb->len, current_rate;
u8 pkt_type;
bool is_pspoll = false;
memset(tx_buffer_head, 0, sizeof(*tx_buffer_head));
hdr = (struct ieee80211_hdr *)(skb->data);
rate = ieee80211_get_tx_rate(priv->hw, info);
current_rate = rate->hw_value;
if (priv->wCurrentRate != current_rate &&
!(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
priv->wCurrentRate = current_rate;
RFbSetPower(priv, priv->wCurrentRate,
priv->hw->conf.chandef.chan->hw_value);
}
if (current_rate > RATE_11M) {
if (info->band == NL80211_BAND_5GHZ) {
pkt_type = PK_TYPE_11A;
} else {
if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
pkt_type = PK_TYPE_11GB;
else
pkt_type = PK_TYPE_11GA;
}
} else {
pkt_type = PK_TYPE_11B;
}
/*Set fifo controls */
if (pkt_type == PK_TYPE_11A)
tx_buffer_head->fifo_ctl = 0;
else if (pkt_type == PK_TYPE_11B)
tx_buffer_head->fifo_ctl = cpu_to_le16(FIFOCTL_11B);
else if (pkt_type == PK_TYPE_11GB)
tx_buffer_head->fifo_ctl = cpu_to_le16(FIFOCTL_11GB);
else if (pkt_type == PK_TYPE_11GA)
tx_buffer_head->fifo_ctl = cpu_to_le16(FIFOCTL_11GA);
/* generate interrupt */
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_GENINT);
if (!ieee80211_is_data(hdr->frame_control)) {
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_TMOEN);
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_ISDMA0);
tx_buffer_head->time_stamp =
cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
} else {
tx_buffer_head->time_stamp =
cpu_to_le16(DEFAULT_MSDU_LIFETIME_RES_64us);
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_NEEDACK);
if (ieee80211_has_retry(hdr->frame_control))
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LRETRY);
if (tx_rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
priv->preamble_type = PREAMBLE_SHORT;
else
priv->preamble_type = PREAMBLE_LONG;
if (tx_rate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_RTS);
if (ieee80211_has_a4(hdr->frame_control)) {
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LHEAD);
priv->bLongHeader = true;
}
if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)
is_pspoll = true;
tx_buffer_head->frag_ctl =
cpu_to_le16(ieee80211_get_hdrlen_from_skb(skb) << 10);
if (info->control.hw_key) {
switch (info->control.hw_key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_LEGACY);
break;
case WLAN_CIPHER_SUITE_TKIP:
tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_TKIP);
break;
case WLAN_CIPHER_SUITE_CCMP:
tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_AES);
break;
default:
break;
}
}
tx_buffer_head->current_rate = cpu_to_le16(current_rate);
/* legacy rates TODO use ieee80211_tx_rate */
if (current_rate >= RATE_18M && ieee80211_is_data(hdr->frame_control)) {
if (priv->byAutoFBCtrl == AUTO_FB_0)
tx_buffer_head->fifo_ctl |=
cpu_to_le16(FIFOCTL_AUTO_FB_0);
else if (priv->byAutoFBCtrl == AUTO_FB_1)
tx_buffer_head->fifo_ctl |=
cpu_to_le16(FIFOCTL_AUTO_FB_1);
}
tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_NONFRAG);
s_cbFillTxBufHead(priv, pkt_type, (u8 *)tx_buffer_head,
dma_idx, head_td, is_pspoll);
if (info->control.hw_key) {
tx_key = info->control.hw_key;
if (tx_key->keylen > 0)
vnt_fill_txkey(hdr, tx_buffer_head->tx_key,
tx_key, skb, tx_body_size,
td_info->mic_hdr);
}
return 0;
}
static int vnt_beacon_xmit(struct vnt_private *priv,
struct sk_buff *skb)
{
struct vnt_tx_short_buf_head *short_head =
(struct vnt_tx_short_buf_head *)priv->tx_beacon_bufs;
struct ieee80211_mgmt *mgmt_hdr = (struct ieee80211_mgmt *)
(priv->tx_beacon_bufs + sizeof(*short_head));
struct ieee80211_tx_info *info;
u32 frame_size = skb->len + 4;
u16 current_rate;
memset(priv->tx_beacon_bufs, 0, sizeof(*short_head));
if (priv->byBBType == BB_TYPE_11A) {
current_rate = RATE_6M;
/* Get SignalField,ServiceField,Length */
vnt_get_phy_field(priv, frame_size, current_rate,
PK_TYPE_11A, &short_head->ab);
/* Get Duration and TimeStampOff */
short_head->duration =
cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B,
frame_size, PK_TYPE_11A, current_rate,
false, 0, 0, 1, AUTO_FB_NONE));
short_head->time_stamp_off =
vnt_time_stamp_off(priv, current_rate);
} else {
current_rate = RATE_1M;
short_head->fifo_ctl |= cpu_to_le16(FIFOCTL_11B);
/* Get SignalField,ServiceField,Length */
vnt_get_phy_field(priv, frame_size, current_rate,
PK_TYPE_11B, &short_head->ab);
/* Get Duration and TimeStampOff */
short_head->duration =
cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B,
frame_size, PK_TYPE_11B, current_rate,
false, 0, 0, 1, AUTO_FB_NONE));
short_head->time_stamp_off =
vnt_time_stamp_off(priv, current_rate);
}
short_head->fifo_ctl |= cpu_to_le16(FIFOCTL_GENINT);
/* Copy Beacon */
memcpy(mgmt_hdr, skb->data, skb->len);
/* time stamp always 0 */
mgmt_hdr->u.beacon.timestamp = 0;
info = IEEE80211_SKB_CB(skb);
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)mgmt_hdr;
hdr->duration_id = 0;
hdr->seq_ctrl = cpu_to_le16(priv->wSeqCounter << 4);
}
priv->wSeqCounter++;
if (priv->wSeqCounter > 0x0fff)
priv->wSeqCounter = 0;
priv->wBCNBufLen = sizeof(*short_head) + skb->len;
iowrite32((u32)priv->tx_beacon_dma, priv->port_offset + MAC_REG_BCNDMAPTR);
iowrite16(priv->wBCNBufLen, priv->port_offset + MAC_REG_BCNDMACTL + 2);
/* Set auto Transmit on */
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
/* Poll Transmit the adapter */
iowrite8(BEACON_READY, priv->port_offset + MAC_REG_BCNDMACTL);
return 0;
}
int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif)
{
struct sk_buff *beacon;
beacon = ieee80211_beacon_get(priv->hw, vif, 0);
if (!beacon)
return -ENOMEM;
if (vnt_beacon_xmit(priv, beacon)) {
ieee80211_free_txskb(priv->hw, beacon);
return -ENODEV;
}
return 0;
}
int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf)
{
iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
CARDvSetFirstNextTBTT(priv, conf->beacon_int);
CARDbSetBeaconPeriod(priv, conf->beacon_int);
return vnt_beacon_make(priv, vif);
}
| linux-master | drivers/staging/vt6655/rxtx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* Purpose: handle dpc rx functions
*
* Author: Lyndon Chen
*
* Date: May 20, 2003
*
* Functions:
*
* Revision History:
*
*/
#include "device.h"
#include "baseband.h"
#include "rf.h"
#include "dpc.h"
static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
u16 bytes_received)
{
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status rx_status = { 0 };
struct ieee80211_hdr *hdr;
__le16 fc;
u8 *rsr, *new_rsr, *rssi;
__le64 *tsf_time;
u16 frame_size;
int ii, r;
u8 *rx_rate;
u8 *skb_data;
u8 rate_idx = 0;
u8 rate[MAX_RATE] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
long rx_dbm;
/* [31:16]RcvByteCount ( not include 4-byte Status ) */
frame_size = le16_to_cpu(*((__le16 *)(skb->data + 2)));
if (frame_size > 2346 || frame_size < 14) {
dev_dbg(&priv->pcid->dev, "------- WRONG Length 1\n");
return false;
}
skb_data = (u8 *)skb->data;
rx_rate = skb_data + 1;
sband = hw->wiphy->bands[hw->conf.chandef.chan->band];
for (r = RATE_1M; r < MAX_RATE; r++) {
if (*rx_rate == rate[r])
break;
}
priv->rx_rate = r;
for (ii = 0; ii < sband->n_bitrates; ii++) {
if (sband->bitrates[ii].hw_value == r) {
rate_idx = ii;
break;
}
}
if (ii == sband->n_bitrates) {
dev_dbg(&priv->pcid->dev, "Wrong RxRate %x\n", *rx_rate);
return false;
}
tsf_time = (__le64 *)(skb_data + bytes_received - 12);
new_rsr = skb_data + bytes_received - 3;
rssi = skb_data + bytes_received - 2;
rsr = skb_data + bytes_received - 1;
if (*rsr & (RSR_IVLDTYP | RSR_IVLDLEN))
return false;
RFvRSSITodBm(priv, *rssi, &rx_dbm);
priv->byBBPreEDRSSI = (u8)rx_dbm + 1;
priv->current_rssi = *rssi;
skb_pull(skb, 4);
skb_trim(skb, frame_size);
rx_status.mactime = le64_to_cpu(*tsf_time);
rx_status.band = hw->conf.chandef.chan->band;
rx_status.signal = rx_dbm;
rx_status.flag = 0;
rx_status.freq = hw->conf.chandef.chan->center_freq;
if (!(*rsr & RSR_CRCOK))
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
hdr = (struct ieee80211_hdr *)(skb->data);
fc = hdr->frame_control;
rx_status.rate_idx = rate_idx;
if (ieee80211_has_protected(fc)) {
if (priv->local_id > REV_ID_VT3253_A1)
rx_status.flag |= RX_FLAG_DECRYPTED;
/* Drop packet */
if (!(*new_rsr & NEWRSR_DECRYPTOK))
return false;
}
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(priv->hw, skb);
return true;
}
bool vnt_receive_frame(struct vnt_private *priv, struct vnt_rx_desc *curr_rd)
{
struct vnt_rd_info *rd_info = curr_rd->rd_info;
struct sk_buff *skb;
u16 frame_size;
skb = rd_info->skb;
dma_unmap_single(&priv->pcid->dev, rd_info->skb_dma,
priv->rx_buf_sz, DMA_FROM_DEVICE);
frame_size = le16_to_cpu(curr_rd->rd1.req_count)
- le16_to_cpu(curr_rd->rd0.res_count);
if ((frame_size > 2364) || (frame_size < 33)) {
/* Frame Size error drop this packet.*/
dev_dbg(&priv->pcid->dev, "Wrong frame size %d\n", frame_size);
dev_kfree_skb_irq(skb);
return true;
}
if (vnt_rx_data(priv, skb, frame_size))
return true;
dev_kfree_skb_irq(skb);
return true;
}
| linux-master | drivers/staging/vt6655/dpc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* Purpose: MAC routines
*
* Author: Tevin Chen
*
* Date: May 21, 1996
*
* Functions:
* vt6655_mac_is_reg_bits_off - Test if All test Bits Off
* vt6655_mac_set_short_retry_limit - Set 802.11 Short Retry limit
* MACvSetLongRetryLimit - Set 802.11 Long Retry limit
* vt6655_mac_set_loopback_mode - Set MAC Loopback Mode
* vt6655_mac_save_context - Save Context of MAC Registers
* vt6655_mac_restore_context - Restore Context of MAC Registers
* MACbSoftwareReset - Software Reset MAC
* vt6655_mac_safe_rx_off - Turn Off MAC Rx
* vt6655_mac_safe_tx_off - Turn Off MAC Tx
* vt6655_mac_safe_stop - Stop MAC function
* MACbShutdown - Shut down MAC
* MACvInitialize - Initialize MAC
* MACvSetCurrRxDescAddr - Set Rx Descriptors Address
* MACvSetCurrTx0DescAddr - Set Tx0 Descriptors Address
* MACvSetCurrTx1DescAddr - Set Tx1 Descriptors Address
* MACvTimer0MicroSDelay - Micro Second Delay Loop by MAC
*
* Revision History:
* 08-22-2003 Kyle Hsu : Porting MAC functions from sim53
* 09-03-2003 Bryan YC Fan : Add MACvClearBusSusInd()&
* MACvEnableBusSusEn()
* 09-18-2003 Jerry Chen : Add MACvSetKeyEntry & MACvDisableKeyEntry
*
*/
#include "mac.h"
void vt6655_mac_reg_bits_on(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask)
{
unsigned char reg_value;
reg_value = ioread8(iobase + reg_offset);
iowrite8(reg_value | bit_mask, iobase + reg_offset);
}
void vt6655_mac_word_reg_bits_on(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask)
{
unsigned short reg_value;
reg_value = ioread16(iobase + reg_offset);
iowrite16(reg_value | (bit_mask), iobase + reg_offset);
}
void vt6655_mac_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask)
{
unsigned char reg_value;
reg_value = ioread8(iobase + reg_offset);
iowrite8(reg_value & ~(bit_mask), iobase + reg_offset);
}
void vt6655_mac_word_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask)
{
unsigned short reg_value;
reg_value = ioread16(iobase + reg_offset);
iowrite16(reg_value & ~(bit_mask), iobase + reg_offset);
}
static void vt6655_mac_clear_stck_ds(void __iomem *iobase)
{
u8 reg_value;
reg_value = ioread8(iobase + MAC_REG_STICKHW);
reg_value = reg_value & 0xFC;
iowrite8(reg_value, iobase + MAC_REG_STICKHW);
}
/*
* Description:
* Test if all test bits off
*
* Parameters:
* In:
* io_base - Base Address for MAC
* reg_offset - Offset of MAC Register
* mask - Test bits
* Out:
* none
*
* Return Value: true if all test bits Off; otherwise false
*
*/
static bool vt6655_mac_is_reg_bits_off(struct vnt_private *priv,
unsigned char reg_offset,
unsigned char mask)
{
void __iomem *io_base = priv->port_offset;
return !(ioread8(io_base + reg_offset) & mask);
}
/*
* Description:
* Set 802.11 Short Retry Limit
*
* Parameters:
* In:
* io_base - Base Address for MAC
* retry_limit - Retry Limit
* Out:
* none
*
* Return Value: none
*
*/
void vt6655_mac_set_short_retry_limit(struct vnt_private *priv, unsigned char retry_limit)
{
void __iomem *io_base = priv->port_offset;
/* set SRT */
iowrite8(retry_limit, io_base + MAC_REG_SRT);
}
/*
* Description:
* Set 802.11 Long Retry Limit
*
* Parameters:
* In:
* io_base - Base Address for MAC
* byRetryLimit- Retry Limit
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetLongRetryLimit(struct vnt_private *priv,
unsigned char byRetryLimit)
{
void __iomem *io_base = priv->port_offset;
/* set LRT */
iowrite8(byRetryLimit, io_base + MAC_REG_LRT);
}
/*
* Description:
* Set MAC Loopback mode
*
* Parameters:
* In:
* io_base - Base Address for MAC
* loopback_mode - Loopback Mode
* Out:
* none
*
* Return Value: none
*
*/
static void vt6655_mac_set_loopback_mode(struct vnt_private *priv, u8 loopback_mode)
{
void __iomem *io_base = priv->port_offset;
loopback_mode <<= 6;
/* set TCR */
iowrite8((ioread8(io_base + MAC_REG_TEST) & 0x3f) | loopback_mode, io_base + MAC_REG_TEST);
}
/*
* Description:
* Save MAC registers to context buffer
*
* Parameters:
* In:
* io_base - Base Address for MAC
* Out:
* cxt_buf - Context buffer
*
* Return Value: none
*
*/
static void vt6655_mac_save_context(struct vnt_private *priv, u8 *cxt_buf)
{
void __iomem *io_base = priv->port_offset;
/* read page0 register */
memcpy_fromio(cxt_buf, io_base, MAC_MAX_CONTEXT_SIZE_PAGE0);
VT6655_MAC_SELECT_PAGE1(io_base);
/* read page1 register */
memcpy_fromio(cxt_buf + MAC_MAX_CONTEXT_SIZE_PAGE0, io_base,
MAC_MAX_CONTEXT_SIZE_PAGE1);
VT6655_MAC_SELECT_PAGE0(io_base);
}
/*
* Description:
* Restore MAC registers from context buffer
*
* Parameters:
* In:
* io_base - Base Address for MAC
* cxt_buf - Context buffer
* Out:
* none
*
* Return Value: none
*
*/
static void vt6655_mac_restore_context(struct vnt_private *priv, u8 *cxt_buf)
{
void __iomem *io_base = priv->port_offset;
VT6655_MAC_SELECT_PAGE1(io_base);
/* restore page1 */
memcpy_toio(io_base, cxt_buf + MAC_MAX_CONTEXT_SIZE_PAGE0,
MAC_MAX_CONTEXT_SIZE_PAGE1);
VT6655_MAC_SELECT_PAGE0(io_base);
/* restore RCR,TCR,IMR... */
memcpy_toio(io_base + MAC_REG_RCR, cxt_buf + MAC_REG_RCR,
MAC_REG_ISR - MAC_REG_RCR);
/* restore MAC Config. */
memcpy_toio(io_base + MAC_REG_LRT, cxt_buf + MAC_REG_LRT,
MAC_REG_PAGE1SEL - MAC_REG_LRT);
iowrite8(*(cxt_buf + MAC_REG_CFG), io_base + MAC_REG_CFG);
/* restore PS Config. */
memcpy_toio(io_base + MAC_REG_PSCFG, cxt_buf + MAC_REG_PSCFG,
MAC_REG_BBREGCTL - MAC_REG_PSCFG);
/* restore CURR_RX_DESC_ADDR, CURR_TX_DESC_ADDR */
iowrite32(*(u32 *)(cxt_buf + MAC_REG_TXDMAPTR0),
io_base + MAC_REG_TXDMAPTR0);
iowrite32(*(u32 *)(cxt_buf + MAC_REG_AC0DMAPTR),
io_base + MAC_REG_AC0DMAPTR);
iowrite32(*(u32 *)(cxt_buf + MAC_REG_BCNDMAPTR),
io_base + MAC_REG_BCNDMAPTR);
iowrite32(*(u32 *)(cxt_buf + MAC_REG_RXDMAPTR0),
io_base + MAC_REG_RXDMAPTR0);
iowrite32(*(u32 *)(cxt_buf + MAC_REG_RXDMAPTR1),
io_base + MAC_REG_RXDMAPTR1);
}
/*
* Description:
* Software Reset MAC
*
* Parameters:
* In:
* io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if Reset Success; otherwise false
*
*/
bool MACbSoftwareReset(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
/* turn on HOSTCR_SOFTRST, just write 0x01 to reset */
iowrite8(0x01, io_base + MAC_REG_HOSTCR);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_SOFTRST))
break;
}
if (ww == W_MAX_TIMEOUT)
return false;
return true;
}
/*
* Description:
* save some important register's value, then do reset, then restore
* register's value
*
* Parameters:
* In:
* io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
static void vt6655_mac_save_soft_reset(struct vnt_private *priv)
{
u8 tmp_reg_data[MAC_MAX_CONTEXT_SIZE_PAGE0 + MAC_MAX_CONTEXT_SIZE_PAGE1];
/* PATCH....
* save some important register's value, then do
* reset, then restore register's value
*/
/* save MAC context */
vt6655_mac_save_context(priv, tmp_reg_data);
/* do reset */
MACbSoftwareReset(priv);
/* restore MAC context, except CR0 */
vt6655_mac_restore_context(priv, tmp_reg_data);
}
/*
* Description:
* Turn Off MAC Rx
*
* Parameters:
* In:
* io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
static bool vt6655_mac_safe_rx_off(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
/* turn off wow temp for turn off Rx safely */
/* Clear RX DMA0,1 */
iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_RXDMACTL0);
iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_RXDMACTL1);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread32(io_base + MAC_REG_RXDMACTL0) & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x10)\n");
return false;
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread32(io_base + MAC_REG_RXDMACTL1) & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x11)\n");
return false;
}
/* try to safe shutdown RX */
vt6655_mac_reg_bits_off(io_base, MAC_REG_HOSTCR, HOSTCR_RXON);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_RXONST))
break;
}
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x12)\n");
return false;
}
return true;
}
/*
* Description:
* Turn Off MAC Tx
*
* Parameters:
* In:
* io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
static bool vt6655_mac_safe_tx_off(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
/* Clear TX DMA */
/* Tx0 */
iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_TXDMACTL0);
/* AC0 */
iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_AC0DMACTL);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread32(io_base + MAC_REG_TXDMACTL0) & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x20)\n");
return false;
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread32(io_base + MAC_REG_AC0DMACTL) & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x21)\n");
return false;
}
/* try to safe shutdown TX */
vt6655_mac_reg_bits_off(io_base, MAC_REG_HOSTCR, HOSTCR_TXON);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_TXONST))
break;
}
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x24)\n");
return false;
}
return true;
}
/*
* Description:
* Stop MAC function
*
* Parameters:
* In:
* io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
static bool vt6655_mac_safe_stop(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
vt6655_mac_reg_bits_off(io_base, MAC_REG_TCR, TCR_AUTOBCNTX);
if (!vt6655_mac_safe_rx_off(priv)) {
pr_debug(" vt6655_mac_safe_rx_off == false)\n");
vt6655_mac_save_soft_reset(priv);
return false;
}
if (!vt6655_mac_safe_tx_off(priv)) {
pr_debug(" vt6655_mac_safe_tx_off == false)\n");
vt6655_mac_save_soft_reset(priv);
return false;
}
vt6655_mac_reg_bits_off(io_base, MAC_REG_HOSTCR, HOSTCR_MACEN);
return true;
}
/*
* Description:
* Shut Down MAC
*
* Parameters:
* In:
* io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
bool MACbShutdown(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
/* disable MAC IMR */
iowrite32(0, io_base + MAC_REG_IMR);
vt6655_mac_set_loopback_mode(priv, MAC_LB_INTERNAL);
/* stop the adapter */
if (!vt6655_mac_safe_stop(priv)) {
vt6655_mac_set_loopback_mode(priv, MAC_LB_NONE);
return false;
}
vt6655_mac_set_loopback_mode(priv, MAC_LB_NONE);
return true;
}
/*
* Description:
* Initialize MAC
*
* Parameters:
* In:
* io_base - Base Address for MAC
* Out:
* none
*
* Return Value: none
*
*/
void MACvInitialize(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
/* clear sticky bits */
vt6655_mac_clear_stck_ds(io_base);
/* disable force PME-enable */
iowrite8(PME_OVR, io_base + MAC_REG_PMC1);
/* only 3253 A */
/* do reset */
MACbSoftwareReset(priv);
/* reset TSF counter */
iowrite8(TFTCTL_TSFCNTRST, io_base + MAC_REG_TFTCTL);
/* enable TSF counter */
iowrite8(TFTCTL_TSFCNTREN, io_base + MAC_REG_TFTCTL);
}
/*
* Description:
* Set the chip with current rx descriptor address
*
* Parameters:
* In:
* io_base - Base Address for MAC
* curr_desc_addr - Descriptor Address
* Out:
* none
*
* Return Value: none
*
*/
void vt6655_mac_set_curr_rx_0_desc_addr(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
unsigned char org_dma_ctl;
org_dma_ctl = ioread8(io_base + MAC_REG_RXDMACTL0);
if (org_dma_ctl & DMACTL_RUN)
iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL0 + 2);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread8(io_base + MAC_REG_RXDMACTL0) & DMACTL_RUN))
break;
}
iowrite32(curr_desc_addr, io_base + MAC_REG_RXDMAPTR0);
if (org_dma_ctl & DMACTL_RUN)
iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL0);
}
/*
* Description:
* Set the chip with current rx descriptor address
*
* Parameters:
* In:
* io_base - Base Address for MAC
* curr_desc_addr - Descriptor Address
* Out:
* none
*
* Return Value: none
*
*/
void vt6655_mac_set_curr_rx_1_desc_addr(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
unsigned char org_dma_ctl;
org_dma_ctl = ioread8(io_base + MAC_REG_RXDMACTL1);
if (org_dma_ctl & DMACTL_RUN)
iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL1 + 2);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread8(io_base + MAC_REG_RXDMACTL1) & DMACTL_RUN))
break;
}
iowrite32(curr_desc_addr, io_base + MAC_REG_RXDMAPTR1);
if (org_dma_ctl & DMACTL_RUN)
iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL1);
}
/*
* Description:
* Set the chip with current tx0 descriptor address
*
* Parameters:
* In:
* io_base - Base Address for MAC
* curr_desc_addr - Descriptor Address
* Out:
* none
*
* Return Value: none
*
*/
static void vt6655_mac_set_curr_tx_0_desc_addr_ex(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
unsigned char org_dma_ctl;
org_dma_ctl = ioread8(io_base + MAC_REG_TXDMACTL0);
if (org_dma_ctl & DMACTL_RUN)
iowrite8(DMACTL_RUN, io_base + MAC_REG_TXDMACTL0 + 2);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread8(io_base + MAC_REG_TXDMACTL0) & DMACTL_RUN))
break;
}
iowrite32(curr_desc_addr, io_base + MAC_REG_TXDMAPTR0);
if (org_dma_ctl & DMACTL_RUN)
iowrite8(DMACTL_RUN, io_base + MAC_REG_TXDMACTL0);
}
/*
* Description:
* Set the chip with current AC0 descriptor address
*
* Parameters:
* In:
* io_base - Base Address for MAC
* curr_desc_addr - Descriptor Address
* Out:
* none
*
* Return Value: none
*
*/
/* TxDMA1 = AC0DMA */
static void vt6655_mac_set_curr_ac_0_desc_addr_ex(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
unsigned char org_dma_ctl;
org_dma_ctl = ioread8(io_base + MAC_REG_AC0DMACTL);
if (org_dma_ctl & DMACTL_RUN)
iowrite8(DMACTL_RUN, io_base + MAC_REG_AC0DMACTL + 2);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread8(io_base + MAC_REG_AC0DMACTL) & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT)
pr_debug(" DBG_PORT80(0x26)\n");
iowrite32(curr_desc_addr, io_base + MAC_REG_AC0DMAPTR);
if (org_dma_ctl & DMACTL_RUN)
iowrite8(DMACTL_RUN, io_base + MAC_REG_AC0DMACTL);
}
void vt6655_mac_set_curr_tx_desc_addr(int tx_type, struct vnt_private *priv, u32 curr_desc_addr)
{
if (tx_type == TYPE_AC0DMA)
vt6655_mac_set_curr_ac_0_desc_addr_ex(priv, curr_desc_addr);
else if (tx_type == TYPE_TXDMA0)
vt6655_mac_set_curr_tx_0_desc_addr_ex(priv, curr_desc_addr);
}
/*
* Description:
* Micro Second Delay via MAC
*
* Parameters:
* In:
* io_base - Base Address for MAC
* uDelay - Delay time (timer resolution is 4 us)
* Out:
* none
*
* Return Value: none
*
*/
void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay)
{
void __iomem *io_base = priv->port_offset;
unsigned char byValue;
unsigned int uu, ii;
iowrite8(0, io_base + MAC_REG_TMCTL0);
iowrite32(uDelay, io_base + MAC_REG_TMDATA0);
iowrite8((TMCTL_TMD | TMCTL_TE), io_base + MAC_REG_TMCTL0);
for (ii = 0; ii < 66; ii++) { /* assume max PCI clock is 66Mhz */
for (uu = 0; uu < uDelay; uu++) {
byValue = ioread8(io_base + MAC_REG_TMCTL0);
if ((byValue == 0) ||
(byValue & TMCTL_TSUSP)) {
iowrite8(0, io_base + MAC_REG_TMCTL0);
return;
}
}
}
iowrite8(0, io_base + MAC_REG_TMCTL0);
}
/*
* Description:
* Micro Second One shot timer via MAC
*
* Parameters:
* In:
* io_base - Base Address for MAC
* uDelay - Delay time
* Out:
* none
*
* Return Value: none
*
*/
void MACvOneShotTimer1MicroSec(struct vnt_private *priv,
unsigned int uDelayTime)
{
void __iomem *io_base = priv->port_offset;
iowrite8(0, io_base + MAC_REG_TMCTL1);
iowrite32(uDelayTime, io_base + MAC_REG_TMDATA1);
iowrite8((TMCTL_TMD | TMCTL_TE), io_base + MAC_REG_TMCTL1);
}
void MACvSetMISCFifo(struct vnt_private *priv, unsigned short offset,
u32 data)
{
void __iomem *io_base = priv->port_offset;
if (offset > 273)
return;
iowrite16(offset, io_base + MAC_REG_MISCFFNDEX);
iowrite32(data, io_base + MAC_REG_MISCFFDATA);
iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL);
}
bool MACbPSWakeup(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
unsigned int ww;
/* Read PSCTL */
if (vt6655_mac_is_reg_bits_off(priv, MAC_REG_PSCTL, PSCTL_PS))
return true;
/* Disable PS */
vt6655_mac_reg_bits_off(io_base, MAC_REG_PSCTL, PSCTL_PSEN);
/* Check if SyncFlushOK */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (ioread8(io_base + MAC_REG_PSCTL) & PSCTL_WAKEDONE)
break;
}
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x33)\n");
return false;
}
return true;
}
/*
* Description:
* Set the Key by MISCFIFO
*
* Parameters:
* In:
* io_base - Base Address for MAC
*
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetKeyEntry(struct vnt_private *priv, unsigned short wKeyCtl,
unsigned int uEntryIdx, unsigned int uKeyIdx,
unsigned char *pbyAddr, u32 *pdwKey,
unsigned char local_id)
{
void __iomem *io_base = priv->port_offset;
unsigned short offset;
u32 data;
int ii;
if (local_id <= 1)
return;
offset = MISCFIFO_KEYETRY0;
offset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
data = 0;
data |= wKeyCtl;
data <<= 16;
data |= MAKEWORD(*(pbyAddr + 4), *(pbyAddr + 5));
pr_debug("1. offset: %d, Data: %X, KeyCtl:%X\n",
offset, data, wKeyCtl);
iowrite16(offset, io_base + MAC_REG_MISCFFNDEX);
iowrite32(data, io_base + MAC_REG_MISCFFDATA);
iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL);
offset++;
data = 0;
data |= *(pbyAddr + 3);
data <<= 8;
data |= *(pbyAddr + 2);
data <<= 8;
data |= *(pbyAddr + 1);
data <<= 8;
data |= *pbyAddr;
pr_debug("2. offset: %d, Data: %X\n", offset, data);
iowrite16(offset, io_base + MAC_REG_MISCFFNDEX);
iowrite32(data, io_base + MAC_REG_MISCFFDATA);
iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL);
offset++;
offset += (uKeyIdx * 4);
for (ii = 0; ii < 4; ii++) {
/* always push 128 bits */
pr_debug("3.(%d) offset: %d, Data: %X\n",
ii, offset + ii, *pdwKey);
iowrite16(offset + ii, io_base + MAC_REG_MISCFFNDEX);
iowrite32(*pdwKey++, io_base + MAC_REG_MISCFFDATA);
iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL);
}
}
/*
* Description:
* Disable the Key Entry by MISCFIFO
*
* Parameters:
* In:
* io_base - Base Address for MAC
*
* Out:
* none
*
* Return Value: none
*
*/
void MACvDisableKeyEntry(struct vnt_private *priv, unsigned int uEntryIdx)
{
void __iomem *io_base = priv->port_offset;
unsigned short offset;
offset = MISCFIFO_KEYETRY0;
offset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
iowrite16(offset, io_base + MAC_REG_MISCFFNDEX);
iowrite32(0, io_base + MAC_REG_MISCFFDATA);
iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL);
}
| linux-master | drivers/staging/vt6655/mac.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* Purpose: Implement functions to access baseband
*
* Author: Kyle Hsu
*
* Date: Aug.22, 2002
*
* Functions:
* bb_get_frame_time - Calculate data frame transmitting time
* bb_read_embedded - Embedded read baseband register via MAC
* bb_write_embedded - Embedded write baseband register via MAC
* bb_vt3253_init - VIA VT3253 baseband chip init code
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
* 08-07-2003 Bryan YC Fan: Add MAXIM2827/2825 and RFMD2959 support.
* 08-26-2003 Kyle Hsu : Modify BBuGetFrameTime() and
* BBvCalculateParameter().
* cancel the setting of MAC_REG_SOFTPWRCTL on
* BBbVT3253Init().
* Add the comments.
* 09-01-2003 Bryan YC Fan: RF & BB tables updated.
* Modified BBvLoopbackOn & BBvLoopbackOff().
*
*
*/
#include "mac.h"
#include "baseband.h"
#include "srom.h"
#include "rf.h"
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
#define CB_VT3253_INIT_FOR_RFMD 446
static const unsigned char by_vt3253_init_tab_rfmd[CB_VT3253_INIT_FOR_RFMD][2] = {
{0x00, 0x30},
{0x01, 0x00},
{0x02, 0x00},
{0x03, 0x00},
{0x04, 0x00},
{0x05, 0x00},
{0x06, 0x00},
{0x07, 0x00},
{0x08, 0x70},
{0x09, 0x45},
{0x0a, 0x2a},
{0x0b, 0x76},
{0x0c, 0x00},
{0x0d, 0x01},
{0x0e, 0x80},
{0x0f, 0x00},
{0x10, 0x00},
{0x11, 0x00},
{0x12, 0x00},
{0x13, 0x00},
{0x14, 0x00},
{0x15, 0x00},
{0x16, 0x00},
{0x17, 0x00},
{0x18, 0x00},
{0x19, 0x00},
{0x1a, 0x00},
{0x1b, 0x9d},
{0x1c, 0x05},
{0x1d, 0x00},
{0x1e, 0x00},
{0x1f, 0x00},
{0x20, 0x00},
{0x21, 0x00},
{0x22, 0x00},
{0x23, 0x00},
{0x24, 0x00},
{0x25, 0x4a},
{0x26, 0x00},
{0x27, 0x00},
{0x28, 0x00},
{0x29, 0x00},
{0x2a, 0x00},
{0x2b, 0x00},
{0x2c, 0x00},
{0x2d, 0xa8},
{0x2e, 0x1a},
{0x2f, 0x0c},
{0x30, 0x26},
{0x31, 0x5b},
{0x32, 0x00},
{0x33, 0x00},
{0x34, 0x00},
{0x35, 0x00},
{0x36, 0xaa},
{0x37, 0xaa},
{0x38, 0xff},
{0x39, 0xff},
{0x3a, 0x00},
{0x3b, 0x00},
{0x3c, 0x00},
{0x3d, 0x0d},
{0x3e, 0x51},
{0x3f, 0x04},
{0x40, 0x00},
{0x41, 0x08},
{0x42, 0x00},
{0x43, 0x08},
{0x44, 0x06},
{0x45, 0x14},
{0x46, 0x05},
{0x47, 0x08},
{0x48, 0x00},
{0x49, 0x00},
{0x4a, 0x00},
{0x4b, 0x00},
{0x4c, 0x09},
{0x4d, 0x80},
{0x4e, 0x00},
{0x4f, 0xc5},
{0x50, 0x14},
{0x51, 0x19},
{0x52, 0x00},
{0x53, 0x00},
{0x54, 0x00},
{0x55, 0x00},
{0x56, 0x00},
{0x57, 0x00},
{0x58, 0x00},
{0x59, 0xb0},
{0x5a, 0x00},
{0x5b, 0x00},
{0x5c, 0x00},
{0x5d, 0x00},
{0x5e, 0x00},
{0x5f, 0x00},
{0x60, 0x44},
{0x61, 0x04},
{0x62, 0x00},
{0x63, 0x00},
{0x64, 0x00},
{0x65, 0x00},
{0x66, 0x04},
{0x67, 0xb7},
{0x68, 0x00},
{0x69, 0x00},
{0x6a, 0x00},
{0x6b, 0x00},
{0x6c, 0x00},
{0x6d, 0x03},
{0x6e, 0x01},
{0x6f, 0x00},
{0x70, 0x00},
{0x71, 0x00},
{0x72, 0x00},
{0x73, 0x00},
{0x74, 0x00},
{0x75, 0x00},
{0x76, 0x00},
{0x77, 0x00},
{0x78, 0x00},
{0x79, 0x00},
{0x7a, 0x00},
{0x7b, 0x00},
{0x7c, 0x00},
{0x7d, 0x00},
{0x7e, 0x00},
{0x7f, 0x00},
{0x80, 0x0b},
{0x81, 0x00},
{0x82, 0x3c},
{0x83, 0x00},
{0x84, 0x00},
{0x85, 0x00},
{0x86, 0x00},
{0x87, 0x00},
{0x88, 0x08},
{0x89, 0x00},
{0x8a, 0x08},
{0x8b, 0xa6},
{0x8c, 0x84},
{0x8d, 0x47},
{0x8e, 0xbb},
{0x8f, 0x02},
{0x90, 0x21},
{0x91, 0x0c},
{0x92, 0x04},
{0x93, 0x22},
{0x94, 0x00},
{0x95, 0x00},
{0x96, 0x00},
{0x97, 0xeb},
{0x98, 0x00},
{0x99, 0x00},
{0x9a, 0x00},
{0x9b, 0x00},
{0x9c, 0x00},
{0x9d, 0x00},
{0x9e, 0x00},
{0x9f, 0x00},
{0xa0, 0x00},
{0xa1, 0x00},
{0xa2, 0x00},
{0xa3, 0x00},
{0xa4, 0x00},
{0xa5, 0x00},
{0xa6, 0x10},
{0xa7, 0x04},
{0xa8, 0x10},
{0xa9, 0x00},
{0xaa, 0x8f},
{0xab, 0x00},
{0xac, 0x00},
{0xad, 0x00},
{0xae, 0x00},
{0xaf, 0x80},
{0xb0, 0x38},
{0xb1, 0x00},
{0xb2, 0x00},
{0xb3, 0x00},
{0xb4, 0xee},
{0xb5, 0xff},
{0xb6, 0x10},
{0xb7, 0x00},
{0xb8, 0x00},
{0xb9, 0x00},
{0xba, 0x00},
{0xbb, 0x03},
{0xbc, 0x00},
{0xbd, 0x00},
{0xbe, 0x00},
{0xbf, 0x00},
{0xc0, 0x10},
{0xc1, 0x10},
{0xc2, 0x18},
{0xc3, 0x20},
{0xc4, 0x10},
{0xc5, 0x00},
{0xc6, 0x22},
{0xc7, 0x14},
{0xc8, 0x0f},
{0xc9, 0x08},
{0xca, 0xa4},
{0xcb, 0xa7},
{0xcc, 0x3c},
{0xcd, 0x10},
{0xce, 0x20},
{0xcf, 0x00},
{0xd0, 0x00},
{0xd1, 0x10},
{0xd2, 0x00},
{0xd3, 0x00},
{0xd4, 0x10},
{0xd5, 0x33},
{0xd6, 0x70},
{0xd7, 0x01},
{0xd8, 0x00},
{0xd9, 0x00},
{0xda, 0x00},
{0xdb, 0x00},
{0xdc, 0x00},
{0xdd, 0x00},
{0xde, 0x00},
{0xdf, 0x00},
{0xe0, 0x00},
{0xe1, 0x00},
{0xe2, 0xcc},
{0xe3, 0x04},
{0xe4, 0x08},
{0xe5, 0x10},
{0xe6, 0x00},
{0xe7, 0x0e},
{0xe8, 0x88},
{0xe9, 0xd4},
{0xea, 0x05},
{0xeb, 0xf0},
{0xec, 0x79},
{0xed, 0x0f},
{0xee, 0x04},
{0xef, 0x04},
{0xf0, 0x00},
{0xf1, 0x00},
{0xf2, 0x00},
{0xf3, 0x00},
{0xf4, 0x00},
{0xf5, 0x00},
{0xf6, 0x00},
{0xf7, 0x00},
{0xf8, 0x00},
{0xf9, 0x00},
{0xF0, 0x00},
{0xF1, 0xF8},
{0xF0, 0x80},
{0xF0, 0x00},
{0xF1, 0xF4},
{0xF0, 0x81},
{0xF0, 0x01},
{0xF1, 0xF0},
{0xF0, 0x82},
{0xF0, 0x02},
{0xF1, 0xEC},
{0xF0, 0x83},
{0xF0, 0x03},
{0xF1, 0xE8},
{0xF0, 0x84},
{0xF0, 0x04},
{0xF1, 0xE4},
{0xF0, 0x85},
{0xF0, 0x05},
{0xF1, 0xE0},
{0xF0, 0x86},
{0xF0, 0x06},
{0xF1, 0xDC},
{0xF0, 0x87},
{0xF0, 0x07},
{0xF1, 0xD8},
{0xF0, 0x88},
{0xF0, 0x08},
{0xF1, 0xD4},
{0xF0, 0x89},
{0xF0, 0x09},
{0xF1, 0xD0},
{0xF0, 0x8A},
{0xF0, 0x0A},
{0xF1, 0xCC},
{0xF0, 0x8B},
{0xF0, 0x0B},
{0xF1, 0xC8},
{0xF0, 0x8C},
{0xF0, 0x0C},
{0xF1, 0xC4},
{0xF0, 0x8D},
{0xF0, 0x0D},
{0xF1, 0xC0},
{0xF0, 0x8E},
{0xF0, 0x0E},
{0xF1, 0xBC},
{0xF0, 0x8F},
{0xF0, 0x0F},
{0xF1, 0xB8},
{0xF0, 0x90},
{0xF0, 0x10},
{0xF1, 0xB4},
{0xF0, 0x91},
{0xF0, 0x11},
{0xF1, 0xB0},
{0xF0, 0x92},
{0xF0, 0x12},
{0xF1, 0xAC},
{0xF0, 0x93},
{0xF0, 0x13},
{0xF1, 0xA8},
{0xF0, 0x94},
{0xF0, 0x14},
{0xF1, 0xA4},
{0xF0, 0x95},
{0xF0, 0x15},
{0xF1, 0xA0},
{0xF0, 0x96},
{0xF0, 0x16},
{0xF1, 0x9C},
{0xF0, 0x97},
{0xF0, 0x17},
{0xF1, 0x98},
{0xF0, 0x98},
{0xF0, 0x18},
{0xF1, 0x94},
{0xF0, 0x99},
{0xF0, 0x19},
{0xF1, 0x90},
{0xF0, 0x9A},
{0xF0, 0x1A},
{0xF1, 0x8C},
{0xF0, 0x9B},
{0xF0, 0x1B},
{0xF1, 0x88},
{0xF0, 0x9C},
{0xF0, 0x1C},
{0xF1, 0x84},
{0xF0, 0x9D},
{0xF0, 0x1D},
{0xF1, 0x80},
{0xF0, 0x9E},
{0xF0, 0x1E},
{0xF1, 0x7C},
{0xF0, 0x9F},
{0xF0, 0x1F},
{0xF1, 0x78},
{0xF0, 0xA0},
{0xF0, 0x20},
{0xF1, 0x74},
{0xF0, 0xA1},
{0xF0, 0x21},
{0xF1, 0x70},
{0xF0, 0xA2},
{0xF0, 0x22},
{0xF1, 0x6C},
{0xF0, 0xA3},
{0xF0, 0x23},
{0xF1, 0x68},
{0xF0, 0xA4},
{0xF0, 0x24},
{0xF1, 0x64},
{0xF0, 0xA5},
{0xF0, 0x25},
{0xF1, 0x60},
{0xF0, 0xA6},
{0xF0, 0x26},
{0xF1, 0x5C},
{0xF0, 0xA7},
{0xF0, 0x27},
{0xF1, 0x58},
{0xF0, 0xA8},
{0xF0, 0x28},
{0xF1, 0x54},
{0xF0, 0xA9},
{0xF0, 0x29},
{0xF1, 0x50},
{0xF0, 0xAA},
{0xF0, 0x2A},
{0xF1, 0x4C},
{0xF0, 0xAB},
{0xF0, 0x2B},
{0xF1, 0x48},
{0xF0, 0xAC},
{0xF0, 0x2C},
{0xF1, 0x44},
{0xF0, 0xAD},
{0xF0, 0x2D},
{0xF1, 0x40},
{0xF0, 0xAE},
{0xF0, 0x2E},
{0xF1, 0x3C},
{0xF0, 0xAF},
{0xF0, 0x2F},
{0xF1, 0x38},
{0xF0, 0xB0},
{0xF0, 0x30},
{0xF1, 0x34},
{0xF0, 0xB1},
{0xF0, 0x31},
{0xF1, 0x30},
{0xF0, 0xB2},
{0xF0, 0x32},
{0xF1, 0x2C},
{0xF0, 0xB3},
{0xF0, 0x33},
{0xF1, 0x28},
{0xF0, 0xB4},
{0xF0, 0x34},
{0xF1, 0x24},
{0xF0, 0xB5},
{0xF0, 0x35},
{0xF1, 0x20},
{0xF0, 0xB6},
{0xF0, 0x36},
{0xF1, 0x1C},
{0xF0, 0xB7},
{0xF0, 0x37},
{0xF1, 0x18},
{0xF0, 0xB8},
{0xF0, 0x38},
{0xF1, 0x14},
{0xF0, 0xB9},
{0xF0, 0x39},
{0xF1, 0x10},
{0xF0, 0xBA},
{0xF0, 0x3A},
{0xF1, 0x0C},
{0xF0, 0xBB},
{0xF0, 0x3B},
{0xF1, 0x08},
{0xF0, 0x00},
{0xF0, 0x3C},
{0xF1, 0x04},
{0xF0, 0xBD},
{0xF0, 0x3D},
{0xF1, 0x00},
{0xF0, 0xBE},
{0xF0, 0x3E},
{0xF1, 0x00},
{0xF0, 0xBF},
{0xF0, 0x3F},
{0xF1, 0x00},
{0xF0, 0xC0},
{0xF0, 0x00},
};
#define CB_VT3253B0_INIT_FOR_RFMD 256
static const unsigned char vt3253b0_rfmd[CB_VT3253B0_INIT_FOR_RFMD][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
{0x03, 0x00},
{0x04, 0x00},
{0x05, 0x81},
{0x06, 0x00},
{0x07, 0x00},
{0x08, 0x38},
{0x09, 0x45},
{0x0a, 0x2a},
{0x0b, 0x76},
{0x0c, 0x00},
{0x0d, 0x00},
{0x0e, 0x80},
{0x0f, 0x00},
{0x10, 0x00},
{0x11, 0x00},
{0x12, 0x00},
{0x13, 0x00},
{0x14, 0x00},
{0x15, 0x00},
{0x16, 0x00},
{0x17, 0x00},
{0x18, 0x00},
{0x19, 0x00},
{0x1a, 0x00},
{0x1b, 0x8e},
{0x1c, 0x06},
{0x1d, 0x00},
{0x1e, 0x00},
{0x1f, 0x00},
{0x20, 0x00},
{0x21, 0x00},
{0x22, 0x00},
{0x23, 0x00},
{0x24, 0x00},
{0x25, 0x4a},
{0x26, 0x00},
{0x27, 0x00},
{0x28, 0x00},
{0x29, 0x00},
{0x2a, 0x00},
{0x2b, 0x00},
{0x2c, 0x00},
{0x2d, 0x34},
{0x2e, 0x18},
{0x2f, 0x0c},
{0x30, 0x26},
{0x31, 0x5b},
{0x32, 0x00},
{0x33, 0x00},
{0x34, 0x00},
{0x35, 0x00},
{0x36, 0xaa},
{0x37, 0xaa},
{0x38, 0xff},
{0x39, 0xff},
{0x3a, 0xf8},
{0x3b, 0x00},
{0x3c, 0x00},
{0x3d, 0x09},
{0x3e, 0x0d},
{0x3f, 0x04},
{0x40, 0x00},
{0x41, 0x08},
{0x42, 0x00},
{0x43, 0x08},
{0x44, 0x08},
{0x45, 0x14},
{0x46, 0x05},
{0x47, 0x08},
{0x48, 0x00},
{0x49, 0x00},
{0x4a, 0x00},
{0x4b, 0x00},
{0x4c, 0x09},
{0x4d, 0x80},
{0x4e, 0x00},
{0x4f, 0xc5},
{0x50, 0x14},
{0x51, 0x19},
{0x52, 0x00},
{0x53, 0x00},
{0x54, 0x00},
{0x55, 0x00},
{0x56, 0x00},
{0x57, 0x00},
{0x58, 0x00},
{0x59, 0xb0},
{0x5a, 0x00},
{0x5b, 0x00},
{0x5c, 0x00},
{0x5d, 0x00},
{0x5e, 0x00},
{0x5f, 0x00},
{0x60, 0x39},
{0x61, 0x83},
{0x62, 0x00},
{0x63, 0x00},
{0x64, 0x00},
{0x65, 0x00},
{0x66, 0xc0},
{0x67, 0x49},
{0x68, 0x00},
{0x69, 0x00},
{0x6a, 0x00},
{0x6b, 0x00},
{0x6c, 0x00},
{0x6d, 0x03},
{0x6e, 0x01},
{0x6f, 0x00},
{0x70, 0x00},
{0x71, 0x00},
{0x72, 0x00},
{0x73, 0x00},
{0x74, 0x00},
{0x75, 0x00},
{0x76, 0x00},
{0x77, 0x00},
{0x78, 0x00},
{0x79, 0x00},
{0x7a, 0x00},
{0x7b, 0x00},
{0x7c, 0x00},
{0x7d, 0x00},
{0x7e, 0x00},
{0x7f, 0x00},
{0x80, 0x89},
{0x81, 0x00},
{0x82, 0x0e},
{0x83, 0x00},
{0x84, 0x00},
{0x85, 0x00},
{0x86, 0x00},
{0x87, 0x00},
{0x88, 0x08},
{0x89, 0x00},
{0x8a, 0x0e},
{0x8b, 0xa7},
{0x8c, 0x88},
{0x8d, 0x47},
{0x8e, 0xaa},
{0x8f, 0x02},
{0x90, 0x23},
{0x91, 0x0c},
{0x92, 0x06},
{0x93, 0x08},
{0x94, 0x00},
{0x95, 0x00},
{0x96, 0x00},
{0x97, 0xeb},
{0x98, 0x00},
{0x99, 0x00},
{0x9a, 0x00},
{0x9b, 0x00},
{0x9c, 0x00},
{0x9d, 0x00},
{0x9e, 0x00},
{0x9f, 0x00},
{0xa0, 0x00},
{0xa1, 0x00},
{0xa2, 0x00},
{0xa3, 0xcd},
{0xa4, 0x07},
{0xa5, 0x33},
{0xa6, 0x18},
{0xa7, 0x00},
{0xa8, 0x18},
{0xa9, 0x00},
{0xaa, 0x28},
{0xab, 0x00},
{0xac, 0x00},
{0xad, 0x00},
{0xae, 0x00},
{0xaf, 0x18},
{0xb0, 0x38},
{0xb1, 0x30},
{0xb2, 0x00},
{0xb3, 0x00},
{0xb4, 0x00},
{0xb5, 0x00},
{0xb6, 0x84},
{0xb7, 0xfd},
{0xb8, 0x00},
{0xb9, 0x00},
{0xba, 0x00},
{0xbb, 0x03},
{0xbc, 0x00},
{0xbd, 0x00},
{0xbe, 0x00},
{0xbf, 0x00},
{0xc0, 0x10},
{0xc1, 0x20},
{0xc2, 0x18},
{0xc3, 0x20},
{0xc4, 0x10},
{0xc5, 0x2c},
{0xc6, 0x1e},
{0xc7, 0x10},
{0xc8, 0x12},
{0xc9, 0x01},
{0xca, 0x6f},
{0xcb, 0xa7},
{0xcc, 0x3c},
{0xcd, 0x10},
{0xce, 0x00},
{0xcf, 0x22},
{0xd0, 0x00},
{0xd1, 0x10},
{0xd2, 0x00},
{0xd3, 0x00},
{0xd4, 0x10},
{0xd5, 0x33},
{0xd6, 0x80},
{0xd7, 0x21},
{0xd8, 0x00},
{0xd9, 0x00},
{0xda, 0x00},
{0xdb, 0x00},
{0xdc, 0x00},
{0xdd, 0x00},
{0xde, 0x00},
{0xdf, 0x00},
{0xe0, 0x00},
{0xe1, 0xB3},
{0xe2, 0x00},
{0xe3, 0x00},
{0xe4, 0x00},
{0xe5, 0x10},
{0xe6, 0x00},
{0xe7, 0x18},
{0xe8, 0x08},
{0xe9, 0xd4},
{0xea, 0x00},
{0xeb, 0xff},
{0xec, 0x79},
{0xed, 0x10},
{0xee, 0x30},
{0xef, 0x02},
{0xf0, 0x00},
{0xf1, 0x09},
{0xf2, 0x00},
{0xf3, 0x00},
{0xf4, 0x00},
{0xf5, 0x00},
{0xf6, 0x00},
{0xf7, 0x00},
{0xf8, 0x00},
{0xf9, 0x00},
{0xfa, 0x00},
{0xfb, 0x00},
{0xfc, 0x00},
{0xfd, 0x00},
{0xfe, 0x00},
{0xff, 0x00},
};
#define CB_VT3253B0_AGC_FOR_RFMD2959 195
/* For RFMD2959 */
static
unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
{0xF0, 0x00},
{0xF1, 0x3E},
{0xF0, 0x80},
{0xF0, 0x00},
{0xF1, 0x3E},
{0xF0, 0x81},
{0xF0, 0x01},
{0xF1, 0x3E},
{0xF0, 0x82},
{0xF0, 0x02},
{0xF1, 0x3E},
{0xF0, 0x83},
{0xF0, 0x03},
{0xF1, 0x3B},
{0xF0, 0x84},
{0xF0, 0x04},
{0xF1, 0x39},
{0xF0, 0x85},
{0xF0, 0x05},
{0xF1, 0x38},
{0xF0, 0x86},
{0xF0, 0x06},
{0xF1, 0x37},
{0xF0, 0x87},
{0xF0, 0x07},
{0xF1, 0x36},
{0xF0, 0x88},
{0xF0, 0x08},
{0xF1, 0x35},
{0xF0, 0x89},
{0xF0, 0x09},
{0xF1, 0x35},
{0xF0, 0x8A},
{0xF0, 0x0A},
{0xF1, 0x34},
{0xF0, 0x8B},
{0xF0, 0x0B},
{0xF1, 0x34},
{0xF0, 0x8C},
{0xF0, 0x0C},
{0xF1, 0x33},
{0xF0, 0x8D},
{0xF0, 0x0D},
{0xF1, 0x32},
{0xF0, 0x8E},
{0xF0, 0x0E},
{0xF1, 0x31},
{0xF0, 0x8F},
{0xF0, 0x0F},
{0xF1, 0x30},
{0xF0, 0x90},
{0xF0, 0x10},
{0xF1, 0x2F},
{0xF0, 0x91},
{0xF0, 0x11},
{0xF1, 0x2F},
{0xF0, 0x92},
{0xF0, 0x12},
{0xF1, 0x2E},
{0xF0, 0x93},
{0xF0, 0x13},
{0xF1, 0x2D},
{0xF0, 0x94},
{0xF0, 0x14},
{0xF1, 0x2C},
{0xF0, 0x95},
{0xF0, 0x15},
{0xF1, 0x2B},
{0xF0, 0x96},
{0xF0, 0x16},
{0xF1, 0x2B},
{0xF0, 0x97},
{0xF0, 0x17},
{0xF1, 0x2A},
{0xF0, 0x98},
{0xF0, 0x18},
{0xF1, 0x29},
{0xF0, 0x99},
{0xF0, 0x19},
{0xF1, 0x28},
{0xF0, 0x9A},
{0xF0, 0x1A},
{0xF1, 0x27},
{0xF0, 0x9B},
{0xF0, 0x1B},
{0xF1, 0x26},
{0xF0, 0x9C},
{0xF0, 0x1C},
{0xF1, 0x25},
{0xF0, 0x9D},
{0xF0, 0x1D},
{0xF1, 0x24},
{0xF0, 0x9E},
{0xF0, 0x1E},
{0xF1, 0x24},
{0xF0, 0x9F},
{0xF0, 0x1F},
{0xF1, 0x23},
{0xF0, 0xA0},
{0xF0, 0x20},
{0xF1, 0x22},
{0xF0, 0xA1},
{0xF0, 0x21},
{0xF1, 0x21},
{0xF0, 0xA2},
{0xF0, 0x22},
{0xF1, 0x20},
{0xF0, 0xA3},
{0xF0, 0x23},
{0xF1, 0x20},
{0xF0, 0xA4},
{0xF0, 0x24},
{0xF1, 0x1F},
{0xF0, 0xA5},
{0xF0, 0x25},
{0xF1, 0x1E},
{0xF0, 0xA6},
{0xF0, 0x26},
{0xF1, 0x1D},
{0xF0, 0xA7},
{0xF0, 0x27},
{0xF1, 0x1C},
{0xF0, 0xA8},
{0xF0, 0x28},
{0xF1, 0x1B},
{0xF0, 0xA9},
{0xF0, 0x29},
{0xF1, 0x1B},
{0xF0, 0xAA},
{0xF0, 0x2A},
{0xF1, 0x1A},
{0xF0, 0xAB},
{0xF0, 0x2B},
{0xF1, 0x1A},
{0xF0, 0xAC},
{0xF0, 0x2C},
{0xF1, 0x19},
{0xF0, 0xAD},
{0xF0, 0x2D},
{0xF1, 0x18},
{0xF0, 0xAE},
{0xF0, 0x2E},
{0xF1, 0x17},
{0xF0, 0xAF},
{0xF0, 0x2F},
{0xF1, 0x16},
{0xF0, 0xB0},
{0xF0, 0x30},
{0xF1, 0x15},
{0xF0, 0xB1},
{0xF0, 0x31},
{0xF1, 0x15},
{0xF0, 0xB2},
{0xF0, 0x32},
{0xF1, 0x15},
{0xF0, 0xB3},
{0xF0, 0x33},
{0xF1, 0x14},
{0xF0, 0xB4},
{0xF0, 0x34},
{0xF1, 0x13},
{0xF0, 0xB5},
{0xF0, 0x35},
{0xF1, 0x12},
{0xF0, 0xB6},
{0xF0, 0x36},
{0xF1, 0x11},
{0xF0, 0xB7},
{0xF0, 0x37},
{0xF1, 0x10},
{0xF0, 0xB8},
{0xF0, 0x38},
{0xF1, 0x0F},
{0xF0, 0xB9},
{0xF0, 0x39},
{0xF1, 0x0E},
{0xF0, 0xBA},
{0xF0, 0x3A},
{0xF1, 0x0D},
{0xF0, 0xBB},
{0xF0, 0x3B},
{0xF1, 0x0C},
{0xF0, 0xBC},
{0xF0, 0x3C},
{0xF1, 0x0B},
{0xF0, 0xBD},
{0xF0, 0x3D},
{0xF1, 0x0B},
{0xF0, 0xBE},
{0xF0, 0x3E},
{0xF1, 0x0A},
{0xF0, 0xBF},
{0xF0, 0x3F},
{0xF1, 0x09},
{0xF0, 0x00},
};
#define CB_VT3253B0_INIT_FOR_AIROHA2230 256
/* For AIROHA */
static
unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
{0x03, 0x00},
{0x04, 0x00},
{0x05, 0x80},
{0x06, 0x00},
{0x07, 0x00},
{0x08, 0x70},
{0x09, 0x41},
{0x0a, 0x2A},
{0x0b, 0x76},
{0x0c, 0x00},
{0x0d, 0x00},
{0x0e, 0x80},
{0x0f, 0x00},
{0x10, 0x00},
{0x11, 0x00},
{0x12, 0x00},
{0x13, 0x00},
{0x14, 0x00},
{0x15, 0x00},
{0x16, 0x00},
{0x17, 0x00},
{0x18, 0x00},
{0x19, 0x00},
{0x1a, 0x00},
{0x1b, 0x8f},
{0x1c, 0x09},
{0x1d, 0x00},
{0x1e, 0x00},
{0x1f, 0x00},
{0x20, 0x00},
{0x21, 0x00},
{0x22, 0x00},
{0x23, 0x00},
{0x24, 0x00},
{0x25, 0x4a},
{0x26, 0x00},
{0x27, 0x00},
{0x28, 0x00},
{0x29, 0x00},
{0x2a, 0x00},
{0x2b, 0x00},
{0x2c, 0x00},
{0x2d, 0x4a},
{0x2e, 0x00},
{0x2f, 0x0a},
{0x30, 0x26},
{0x31, 0x5b},
{0x32, 0x00},
{0x33, 0x00},
{0x34, 0x00},
{0x35, 0x00},
{0x36, 0xaa},
{0x37, 0xaa},
{0x38, 0xff},
{0x39, 0xff},
{0x3a, 0x79},
{0x3b, 0x00},
{0x3c, 0x00},
{0x3d, 0x0b},
{0x3e, 0x48},
{0x3f, 0x04},
{0x40, 0x00},
{0x41, 0x08},
{0x42, 0x00},
{0x43, 0x08},
{0x44, 0x08},
{0x45, 0x14},
{0x46, 0x05},
{0x47, 0x09},
{0x48, 0x00},
{0x49, 0x00},
{0x4a, 0x00},
{0x4b, 0x00},
{0x4c, 0x09},
{0x4d, 0x73},
{0x4e, 0x00},
{0x4f, 0xc5},
{0x50, 0x15},
{0x51, 0x19},
{0x52, 0x00},
{0x53, 0x00},
{0x54, 0x00},
{0x55, 0x00},
{0x56, 0x00},
{0x57, 0x00},
{0x58, 0x00},
{0x59, 0xb0},
{0x5a, 0x00},
{0x5b, 0x00},
{0x5c, 0x00},
{0x5d, 0x00},
{0x5e, 0x00},
{0x5f, 0x00},
{0x60, 0xe4},
{0x61, 0x80},
{0x62, 0x00},
{0x63, 0x00},
{0x64, 0x00},
{0x65, 0x00},
{0x66, 0x98},
{0x67, 0x0a},
{0x68, 0x00},
{0x69, 0x00},
{0x6a, 0x00},
{0x6b, 0x00},
{0x6c, 0x00}, /* RobertYu:20050125, request by JJSue */
{0x6d, 0x03},
{0x6e, 0x01},
{0x6f, 0x00},
{0x70, 0x00},
{0x71, 0x00},
{0x72, 0x00},
{0x73, 0x00},
{0x74, 0x00},
{0x75, 0x00},
{0x76, 0x00},
{0x77, 0x00},
{0x78, 0x00},
{0x79, 0x00},
{0x7a, 0x00},
{0x7b, 0x00},
{0x7c, 0x00},
{0x7d, 0x00},
{0x7e, 0x00},
{0x7f, 0x00},
{0x80, 0x8c},
{0x81, 0x01},
{0x82, 0x09},
{0x83, 0x00},
{0x84, 0x00},
{0x85, 0x00},
{0x86, 0x00},
{0x87, 0x00},
{0x88, 0x08},
{0x89, 0x00},
{0x8a, 0x0f},
{0x8b, 0xb7},
{0x8c, 0x88},
{0x8d, 0x47},
{0x8e, 0xaa},
{0x8f, 0x02},
{0x90, 0x22},
{0x91, 0x00},
{0x92, 0x00},
{0x93, 0x00},
{0x94, 0x00},
{0x95, 0x00},
{0x96, 0x00},
{0x97, 0xeb},
{0x98, 0x00},
{0x99, 0x00},
{0x9a, 0x00},
{0x9b, 0x00},
{0x9c, 0x00},
{0x9d, 0x00},
{0x9e, 0x00},
{0x9f, 0x01},
{0xa0, 0x00},
{0xa1, 0x00},
{0xa2, 0x00},
{0xa3, 0x00},
{0xa4, 0x00},
{0xa5, 0x00},
{0xa6, 0x10},
{0xa7, 0x00},
{0xa8, 0x18},
{0xa9, 0x00},
{0xaa, 0x00},
{0xab, 0x00},
{0xac, 0x00},
{0xad, 0x00},
{0xae, 0x00},
{0xaf, 0x18},
{0xb0, 0x38},
{0xb1, 0x30},
{0xb2, 0x00},
{0xb3, 0x00},
{0xb4, 0xff},
{0xb5, 0x0f},
{0xb6, 0xe4},
{0xb7, 0xe2},
{0xb8, 0x00},
{0xb9, 0x00},
{0xba, 0x00},
{0xbb, 0x03},
{0xbc, 0x01},
{0xbd, 0x00},
{0xbe, 0x00},
{0xbf, 0x00},
{0xc0, 0x18},
{0xc1, 0x20},
{0xc2, 0x07},
{0xc3, 0x18},
{0xc4, 0xff},
{0xc5, 0x2c},
{0xc6, 0x0c},
{0xc7, 0x0a},
{0xc8, 0x0e},
{0xc9, 0x01},
{0xca, 0x68},
{0xcb, 0xa7},
{0xcc, 0x3c},
{0xcd, 0x10},
{0xce, 0x00},
{0xcf, 0x25},
{0xd0, 0x40},
{0xd1, 0x12},
{0xd2, 0x00},
{0xd3, 0x00},
{0xd4, 0x10},
{0xd5, 0x28},
{0xd6, 0x80},
{0xd7, 0x2A},
{0xd8, 0x00},
{0xd9, 0x00},
{0xda, 0x00},
{0xdb, 0x00},
{0xdc, 0x00},
{0xdd, 0x00},
{0xde, 0x00},
{0xdf, 0x00},
{0xe0, 0x00},
{0xe1, 0xB3},
{0xe2, 0x00},
{0xe3, 0x00},
{0xe4, 0x00},
{0xe5, 0x10},
{0xe6, 0x00},
{0xe7, 0x1C},
{0xe8, 0x00},
{0xe9, 0xf4},
{0xea, 0x00},
{0xeb, 0xff},
{0xec, 0x79},
{0xed, 0x20},
{0xee, 0x30},
{0xef, 0x01},
{0xf0, 0x00},
{0xf1, 0x3e},
{0xf2, 0x00},
{0xf3, 0x00},
{0xf4, 0x00},
{0xf5, 0x00},
{0xf6, 0x00},
{0xf7, 0x00},
{0xf8, 0x00},
{0xf9, 0x00},
{0xfa, 0x00},
{0xfb, 0x00},
{0xfc, 0x00},
{0xfd, 0x00},
{0xfe, 0x00},
{0xff, 0x00},
};
#define CB_VT3253B0_INIT_FOR_UW2451 256
/* For UW2451 */
static unsigned char byVT3253B0_UW2451[CB_VT3253B0_INIT_FOR_UW2451][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
{0x03, 0x00},
{0x04, 0x00},
{0x05, 0x81},
{0x06, 0x00},
{0x07, 0x00},
{0x08, 0x38},
{0x09, 0x45},
{0x0a, 0x28},
{0x0b, 0x76},
{0x0c, 0x00},
{0x0d, 0x00},
{0x0e, 0x80},
{0x0f, 0x00},
{0x10, 0x00},
{0x11, 0x00},
{0x12, 0x00},
{0x13, 0x00},
{0x14, 0x00},
{0x15, 0x00},
{0x16, 0x00},
{0x17, 0x00},
{0x18, 0x00},
{0x19, 0x00},
{0x1a, 0x00},
{0x1b, 0x8f},
{0x1c, 0x0f},
{0x1d, 0x00},
{0x1e, 0x00},
{0x1f, 0x00},
{0x20, 0x00},
{0x21, 0x00},
{0x22, 0x00},
{0x23, 0x00},
{0x24, 0x00},
{0x25, 0x4a},
{0x26, 0x00},
{0x27, 0x00},
{0x28, 0x00},
{0x29, 0x00},
{0x2a, 0x00},
{0x2b, 0x00},
{0x2c, 0x00},
{0x2d, 0x18},
{0x2e, 0x00},
{0x2f, 0x0a},
{0x30, 0x26},
{0x31, 0x5b},
{0x32, 0x00},
{0x33, 0x00},
{0x34, 0x00},
{0x35, 0x00},
{0x36, 0xaa},
{0x37, 0xaa},
{0x38, 0xff},
{0x39, 0xff},
{0x3a, 0x00},
{0x3b, 0x00},
{0x3c, 0x00},
{0x3d, 0x03},
{0x3e, 0x1d},
{0x3f, 0x04},
{0x40, 0x00},
{0x41, 0x08},
{0x42, 0x00},
{0x43, 0x08},
{0x44, 0x08},
{0x45, 0x14},
{0x46, 0x05},
{0x47, 0x09},
{0x48, 0x00},
{0x49, 0x00},
{0x4a, 0x00},
{0x4b, 0x00},
{0x4c, 0x09},
{0x4d, 0x90},
{0x4e, 0x00},
{0x4f, 0xc5},
{0x50, 0x15},
{0x51, 0x19},
{0x52, 0x00},
{0x53, 0x00},
{0x54, 0x00},
{0x55, 0x00},
{0x56, 0x00},
{0x57, 0x00},
{0x58, 0x00},
{0x59, 0xb0},
{0x5a, 0x00},
{0x5b, 0x00},
{0x5c, 0x00},
{0x5d, 0x00},
{0x5e, 0x00},
{0x5f, 0x00},
{0x60, 0xb3},
{0x61, 0x81},
{0x62, 0x00},
{0x63, 0x00},
{0x64, 0x00},
{0x65, 0x00},
{0x66, 0x57},
{0x67, 0x6c},
{0x68, 0x00},
{0x69, 0x00},
{0x6a, 0x00},
{0x6b, 0x00},
{0x6c, 0x00}, /* RobertYu:20050125, request by JJSue */
{0x6d, 0x03},
{0x6e, 0x01},
{0x6f, 0x00},
{0x70, 0x00},
{0x71, 0x00},
{0x72, 0x00},
{0x73, 0x00},
{0x74, 0x00},
{0x75, 0x00},
{0x76, 0x00},
{0x77, 0x00},
{0x78, 0x00},
{0x79, 0x00},
{0x7a, 0x00},
{0x7b, 0x00},
{0x7c, 0x00},
{0x7d, 0x00},
{0x7e, 0x00},
{0x7f, 0x00},
{0x80, 0x8c},
{0x81, 0x00},
{0x82, 0x0e},
{0x83, 0x00},
{0x84, 0x00},
{0x85, 0x00},
{0x86, 0x00},
{0x87, 0x00},
{0x88, 0x08},
{0x89, 0x00},
{0x8a, 0x0e},
{0x8b, 0xa7},
{0x8c, 0x88},
{0x8d, 0x47},
{0x8e, 0xaa},
{0x8f, 0x02},
{0x90, 0x00},
{0x91, 0x00},
{0x92, 0x00},
{0x93, 0x00},
{0x94, 0x00},
{0x95, 0x00},
{0x96, 0x00},
{0x97, 0xe3},
{0x98, 0x00},
{0x99, 0x00},
{0x9a, 0x00},
{0x9b, 0x00},
{0x9c, 0x00},
{0x9d, 0x00},
{0x9e, 0x00},
{0x9f, 0x00},
{0xa0, 0x00},
{0xa1, 0x00},
{0xa2, 0x00},
{0xa3, 0x00},
{0xa4, 0x00},
{0xa5, 0x00},
{0xa6, 0x10},
{0xa7, 0x00},
{0xa8, 0x18},
{0xa9, 0x00},
{0xaa, 0x00},
{0xab, 0x00},
{0xac, 0x00},
{0xad, 0x00},
{0xae, 0x00},
{0xaf, 0x18},
{0xb0, 0x18},
{0xb1, 0x30},
{0xb2, 0x00},
{0xb3, 0x00},
{0xb4, 0x00},
{0xb5, 0x00},
{0xb6, 0x00},
{0xb7, 0x00},
{0xb8, 0x00},
{0xb9, 0x00},
{0xba, 0x00},
{0xbb, 0x03},
{0xbc, 0x01},
{0xbd, 0x00},
{0xbe, 0x00},
{0xbf, 0x00},
{0xc0, 0x10},
{0xc1, 0x20},
{0xc2, 0x00},
{0xc3, 0x20},
{0xc4, 0x00},
{0xc5, 0x2c},
{0xc6, 0x1c},
{0xc7, 0x10},
{0xc8, 0x10},
{0xc9, 0x01},
{0xca, 0x68},
{0xcb, 0xa7},
{0xcc, 0x3c},
{0xcd, 0x09},
{0xce, 0x00},
{0xcf, 0x20},
{0xd0, 0x40},
{0xd1, 0x10},
{0xd2, 0x00},
{0xd3, 0x00},
{0xd4, 0x20},
{0xd5, 0x28},
{0xd6, 0xa0},
{0xd7, 0x2a},
{0xd8, 0x00},
{0xd9, 0x00},
{0xda, 0x00},
{0xdb, 0x00},
{0xdc, 0x00},
{0xdd, 0x00},
{0xde, 0x00},
{0xdf, 0x00},
{0xe0, 0x00},
{0xe1, 0xd3},
{0xe2, 0xc0},
{0xe3, 0x00},
{0xe4, 0x00},
{0xe5, 0x10},
{0xe6, 0x00},
{0xe7, 0x12},
{0xe8, 0x12},
{0xe9, 0x34},
{0xea, 0x00},
{0xeb, 0xff},
{0xec, 0x79},
{0xed, 0x20},
{0xee, 0x30},
{0xef, 0x01},
{0xf0, 0x00},
{0xf1, 0x3e},
{0xf2, 0x00},
{0xf3, 0x00},
{0xf4, 0x00},
{0xf5, 0x00},
{0xf6, 0x00},
{0xf7, 0x00},
{0xf8, 0x00},
{0xf9, 0x00},
{0xfa, 0x00},
{0xfb, 0x00},
{0xfc, 0x00},
{0xfd, 0x00},
{0xfe, 0x00},
{0xff, 0x00},
};
#define CB_VT3253B0_AGC 193
/* For AIROHA */
static unsigned char byVT3253B0_AGC[CB_VT3253B0_AGC][2] = {
{0xF0, 0x00},
{0xF1, 0x00},
{0xF0, 0x80},
{0xF0, 0x01},
{0xF1, 0x00},
{0xF0, 0x81},
{0xF0, 0x02},
{0xF1, 0x02},
{0xF0, 0x82},
{0xF0, 0x03},
{0xF1, 0x04},
{0xF0, 0x83},
{0xF0, 0x03},
{0xF1, 0x04},
{0xF0, 0x84},
{0xF0, 0x04},
{0xF1, 0x06},
{0xF0, 0x85},
{0xF0, 0x05},
{0xF1, 0x06},
{0xF0, 0x86},
{0xF0, 0x06},
{0xF1, 0x06},
{0xF0, 0x87},
{0xF0, 0x07},
{0xF1, 0x08},
{0xF0, 0x88},
{0xF0, 0x08},
{0xF1, 0x08},
{0xF0, 0x89},
{0xF0, 0x09},
{0xF1, 0x0A},
{0xF0, 0x8A},
{0xF0, 0x0A},
{0xF1, 0x0A},
{0xF0, 0x8B},
{0xF0, 0x0B},
{0xF1, 0x0C},
{0xF0, 0x8C},
{0xF0, 0x0C},
{0xF1, 0x0C},
{0xF0, 0x8D},
{0xF0, 0x0D},
{0xF1, 0x0E},
{0xF0, 0x8E},
{0xF0, 0x0E},
{0xF1, 0x0E},
{0xF0, 0x8F},
{0xF0, 0x0F},
{0xF1, 0x10},
{0xF0, 0x90},
{0xF0, 0x10},
{0xF1, 0x10},
{0xF0, 0x91},
{0xF0, 0x11},
{0xF1, 0x12},
{0xF0, 0x92},
{0xF0, 0x12},
{0xF1, 0x12},
{0xF0, 0x93},
{0xF0, 0x13},
{0xF1, 0x14},
{0xF0, 0x94},
{0xF0, 0x14},
{0xF1, 0x14},
{0xF0, 0x95},
{0xF0, 0x15},
{0xF1, 0x16},
{0xF0, 0x96},
{0xF0, 0x16},
{0xF1, 0x16},
{0xF0, 0x97},
{0xF0, 0x17},
{0xF1, 0x18},
{0xF0, 0x98},
{0xF0, 0x18},
{0xF1, 0x18},
{0xF0, 0x99},
{0xF0, 0x19},
{0xF1, 0x1A},
{0xF0, 0x9A},
{0xF0, 0x1A},
{0xF1, 0x1A},
{0xF0, 0x9B},
{0xF0, 0x1B},
{0xF1, 0x1C},
{0xF0, 0x9C},
{0xF0, 0x1C},
{0xF1, 0x1C},
{0xF0, 0x9D},
{0xF0, 0x1D},
{0xF1, 0x1E},
{0xF0, 0x9E},
{0xF0, 0x1E},
{0xF1, 0x1E},
{0xF0, 0x9F},
{0xF0, 0x1F},
{0xF1, 0x20},
{0xF0, 0xA0},
{0xF0, 0x20},
{0xF1, 0x20},
{0xF0, 0xA1},
{0xF0, 0x21},
{0xF1, 0x22},
{0xF0, 0xA2},
{0xF0, 0x22},
{0xF1, 0x22},
{0xF0, 0xA3},
{0xF0, 0x23},
{0xF1, 0x24},
{0xF0, 0xA4},
{0xF0, 0x24},
{0xF1, 0x24},
{0xF0, 0xA5},
{0xF0, 0x25},
{0xF1, 0x26},
{0xF0, 0xA6},
{0xF0, 0x26},
{0xF1, 0x26},
{0xF0, 0xA7},
{0xF0, 0x27},
{0xF1, 0x28},
{0xF0, 0xA8},
{0xF0, 0x28},
{0xF1, 0x28},
{0xF0, 0xA9},
{0xF0, 0x29},
{0xF1, 0x2A},
{0xF0, 0xAA},
{0xF0, 0x2A},
{0xF1, 0x2A},
{0xF0, 0xAB},
{0xF0, 0x2B},
{0xF1, 0x2C},
{0xF0, 0xAC},
{0xF0, 0x2C},
{0xF1, 0x2C},
{0xF0, 0xAD},
{0xF0, 0x2D},
{0xF1, 0x2E},
{0xF0, 0xAE},
{0xF0, 0x2E},
{0xF1, 0x2E},
{0xF0, 0xAF},
{0xF0, 0x2F},
{0xF1, 0x30},
{0xF0, 0xB0},
{0xF0, 0x30},
{0xF1, 0x30},
{0xF0, 0xB1},
{0xF0, 0x31},
{0xF1, 0x32},
{0xF0, 0xB2},
{0xF0, 0x32},
{0xF1, 0x32},
{0xF0, 0xB3},
{0xF0, 0x33},
{0xF1, 0x34},
{0xF0, 0xB4},
{0xF0, 0x34},
{0xF1, 0x34},
{0xF0, 0xB5},
{0xF0, 0x35},
{0xF1, 0x36},
{0xF0, 0xB6},
{0xF0, 0x36},
{0xF1, 0x36},
{0xF0, 0xB7},
{0xF0, 0x37},
{0xF1, 0x38},
{0xF0, 0xB8},
{0xF0, 0x38},
{0xF1, 0x38},
{0xF0, 0xB9},
{0xF0, 0x39},
{0xF1, 0x3A},
{0xF0, 0xBA},
{0xF0, 0x3A},
{0xF1, 0x3A},
{0xF0, 0xBB},
{0xF0, 0x3B},
{0xF1, 0x3C},
{0xF0, 0xBC},
{0xF0, 0x3C},
{0xF1, 0x3C},
{0xF0, 0xBD},
{0xF0, 0x3D},
{0xF1, 0x3E},
{0xF0, 0xBE},
{0xF0, 0x3E},
{0xF1, 0x3E},
{0xF0, 0xBF},
{0xF0, 0x00},
};
static const unsigned short awc_frame_time[MAX_RATE] = {
10, 20, 55, 110, 24, 36, 48, 72, 96, 144, 192, 216
};
/*--------------------- Export Variables --------------------------*/
/*
* Description: Calculate data frame transmitting time
*
* Parameters:
* In:
* preamble_type - Preamble Type
* by_pkt_type - PK_TYPE_11A, PK_TYPE_11B, PK_TYPE_11GB, PK_TYPE_11GA
* cb_frame_length - Baseband Type
* tx_rate - Tx Rate
* Out:
*
* Return Value: FrameTime
*
*/
unsigned int bb_get_frame_time(unsigned char preamble_type,
unsigned char by_pkt_type,
unsigned int cb_frame_length,
unsigned short tx_rate)
{
unsigned int frame_time;
unsigned int preamble;
unsigned int tmp;
unsigned int rate_idx = (unsigned int)tx_rate;
unsigned int rate = 0;
if (rate_idx > RATE_54M)
return 0;
rate = (unsigned int)awc_frame_time[rate_idx];
if (rate_idx <= 3) { /* CCK mode */
if (preamble_type == PREAMBLE_SHORT)
preamble = 96;
else
preamble = 192;
frame_time = (cb_frame_length * 80) / rate; /* ????? */
tmp = (frame_time * rate) / 80;
if (cb_frame_length != tmp)
frame_time++;
return preamble + frame_time;
}
frame_time = (cb_frame_length * 8 + 22) / rate; /* ???????? */
tmp = ((frame_time * rate) - 22) / 8;
if (cb_frame_length != tmp)
frame_time++;
frame_time = frame_time * 4; /* ??????? */
if (by_pkt_type != PK_TYPE_11A)
frame_time += 6; /* ?????? */
return 20 + frame_time; /* ?????? */
}
/*
* Description: Calculate Length, Service, and Signal fields of Phy for Tx
*
* Parameters:
* In:
* priv - Device Structure
* frame_length - Tx Frame Length
* tx_rate - Tx Rate
* Out:
* struct vnt_phy_field *phy
* - pointer to Phy Length field
* - pointer to Phy Service field
* - pointer to Phy Signal field
*
* Return Value: none
*
*/
void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy)
{
u32 bit_count;
u32 count = 0;
u32 tmp;
int ext_bit;
u8 preamble_type = priv->preamble_type;
bit_count = frame_length * 8;
ext_bit = false;
switch (tx_rate) {
case RATE_1M:
count = bit_count;
phy->signal = 0x00;
break;
case RATE_2M:
count = bit_count / 2;
if (preamble_type == PREAMBLE_SHORT)
phy->signal = 0x09;
else
phy->signal = 0x01;
break;
case RATE_5M:
count = (bit_count * 10) / 55;
tmp = (count * 55) / 10;
if (tmp != bit_count)
count++;
if (preamble_type == PREAMBLE_SHORT)
phy->signal = 0x0a;
else
phy->signal = 0x02;
break;
case RATE_11M:
count = bit_count / 11;
tmp = count * 11;
if (tmp != bit_count) {
count++;
if ((bit_count - tmp) <= 3)
ext_bit = true;
}
if (preamble_type == PREAMBLE_SHORT)
phy->signal = 0x0b;
else
phy->signal = 0x03;
break;
case RATE_6M:
if (pkt_type == PK_TYPE_11A)
phy->signal = 0x9b;
else
phy->signal = 0x8b;
break;
case RATE_9M:
if (pkt_type == PK_TYPE_11A)
phy->signal = 0x9f;
else
phy->signal = 0x8f;
break;
case RATE_12M:
if (pkt_type == PK_TYPE_11A)
phy->signal = 0x9a;
else
phy->signal = 0x8a;
break;
case RATE_18M:
if (pkt_type == PK_TYPE_11A)
phy->signal = 0x9e;
else
phy->signal = 0x8e;
break;
case RATE_24M:
if (pkt_type == PK_TYPE_11A)
phy->signal = 0x99;
else
phy->signal = 0x89;
break;
case RATE_36M:
if (pkt_type == PK_TYPE_11A)
phy->signal = 0x9d;
else
phy->signal = 0x8d;
break;
case RATE_48M:
if (pkt_type == PK_TYPE_11A)
phy->signal = 0x98;
else
phy->signal = 0x88;
break;
case RATE_54M:
if (pkt_type == PK_TYPE_11A)
phy->signal = 0x9c;
else
phy->signal = 0x8c;
break;
default:
if (pkt_type == PK_TYPE_11A)
phy->signal = 0x9c;
else
phy->signal = 0x8c;
break;
}
if (pkt_type == PK_TYPE_11B) {
phy->service = 0x00;
if (ext_bit)
phy->service |= 0x80;
phy->len = cpu_to_le16((u16)count);
} else {
phy->service = 0x00;
phy->len = cpu_to_le16((u16)frame_length);
}
}
/*
* Description: Read a byte from BASEBAND, by embedded programming
*
* Parameters:
* In:
* iobase - I/O base address
* by_bb_addr - address of register in Baseband
* Out:
* pby_data - data read
*
* Return Value: true if succeeded; false if failed.
*
*/
bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
unsigned char *pby_data)
{
void __iomem *iobase = priv->port_offset;
unsigned short ww;
unsigned char by_value;
/* BB reg offset */
iowrite8(by_bb_addr, iobase + MAC_REG_BBREGADR);
/* turn on REGR */
vt6655_mac_reg_bits_on(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
by_value = ioread8(iobase + MAC_REG_BBREGCTL);
if (by_value & BBREGCTL_DONE)
break;
}
/* get BB data */
*pby_data = ioread8(iobase + MAC_REG_BBREGDATA);
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x30)\n");
return false;
}
return true;
}
/*
* Description: Write a Byte to BASEBAND, by embedded programming
*
* Parameters:
* In:
* iobase - I/O base address
* by_bb_addr - address of register in Baseband
* by_data - data to write
* Out:
* none
*
* Return Value: true if succeeded; false if failed.
*
*/
bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
unsigned char by_data)
{
void __iomem *iobase = priv->port_offset;
unsigned short ww;
unsigned char by_value;
/* BB reg offset */
iowrite8(by_bb_addr, iobase + MAC_REG_BBREGADR);
/* set BB data */
iowrite8(by_data, iobase + MAC_REG_BBREGDATA);
/* turn on BBREGCTL_REGW */
vt6655_mac_reg_bits_on(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
by_value = ioread8(iobase + MAC_REG_BBREGCTL);
if (by_value & BBREGCTL_DONE)
break;
}
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x31)\n");
return false;
}
return true;
}
/*
* Description: VIA VT3253 Baseband chip init function
*
* Parameters:
* In:
* iobase - I/O base address
* byRevId - Revision ID
* byRFType - RF type
* Out:
* none
*
* Return Value: true if succeeded; false if failed.
*
*/
bool bb_vt3253_init(struct vnt_private *priv)
{
bool result = true;
int ii;
void __iomem *iobase = priv->port_offset;
unsigned char by_rf_type = priv->byRFType;
unsigned char by_local_id = priv->local_id;
if (by_rf_type == RF_RFMD2959) {
if (by_local_id <= REV_ID_VT3253_A1) {
for (ii = 0; ii < CB_VT3253_INIT_FOR_RFMD; ii++)
result &= bb_write_embedded(priv,
by_vt3253_init_tab_rfmd[ii][0],
by_vt3253_init_tab_rfmd[ii][1]);
} else {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_RFMD; ii++)
result &= bb_write_embedded(priv,
vt3253b0_rfmd[ii][0],
vt3253b0_rfmd[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC_FOR_RFMD2959; ii++)
result &= bb_write_embedded(priv,
byVT3253B0_AGC4_RFMD2959[ii][0],
byVT3253B0_AGC4_RFMD2959[ii][1]);
iowrite32(0x23, iobase + MAC_REG_ITRTMSET);
vt6655_mac_reg_bits_on(iobase, MAC_REG_PAPEDELAY, BIT(0));
}
priv->abyBBVGA[0] = 0x18;
priv->abyBBVGA[1] = 0x0A;
priv->abyBBVGA[2] = 0x0;
priv->abyBBVGA[3] = 0x0;
priv->dbm_threshold[0] = -70;
priv->dbm_threshold[1] = -50;
priv->dbm_threshold[2] = 0;
priv->dbm_threshold[3] = 0;
} else if ((by_rf_type == RF_AIROHA) || (by_rf_type == RF_AL2230S)) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
result &= bb_write_embedded(priv,
byVT3253B0_AIROHA2230[ii][0],
byVT3253B0_AIROHA2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[1] = 0x10;
priv->abyBBVGA[2] = 0x0;
priv->abyBBVGA[3] = 0x0;
priv->dbm_threshold[0] = -70;
priv->dbm_threshold[1] = -48;
priv->dbm_threshold[2] = 0;
priv->dbm_threshold[3] = 0;
} else if (by_rf_type == RF_UW2451) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
result &= bb_write_embedded(priv,
byVT3253B0_UW2451[ii][0],
byVT3253B0_UW2451[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0],
byVT3253B0_AGC[ii][1]);
iowrite8(0x23, iobase + MAC_REG_ITRTMSET);
vt6655_mac_reg_bits_on(iobase, MAC_REG_PAPEDELAY, BIT(0));
priv->abyBBVGA[0] = 0x14;
priv->abyBBVGA[1] = 0x0A;
priv->abyBBVGA[2] = 0x0;
priv->abyBBVGA[3] = 0x0;
priv->dbm_threshold[0] = -60;
priv->dbm_threshold[1] = -50;
priv->dbm_threshold[2] = 0;
priv->dbm_threshold[3] = 0;
} else if (by_rf_type == RF_VT3226) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
result &= bb_write_embedded(priv,
byVT3253B0_AIROHA2230[ii][0],
byVT3253B0_AIROHA2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[1] = 0x10;
priv->abyBBVGA[2] = 0x0;
priv->abyBBVGA[3] = 0x0;
priv->dbm_threshold[0] = -70;
priv->dbm_threshold[1] = -48;
priv->dbm_threshold[2] = 0;
priv->dbm_threshold[3] = 0;
/* Fix VT3226 DFC system timing issue */
vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT);
/* {{ RobertYu: 20050104 */
} else {
/* No VGA Table now */
priv->bUpdateBBVGA = false;
priv->abyBBVGA[0] = 0x1C;
}
if (by_local_id > REV_ID_VT3253_A1) {
bb_write_embedded(priv, 0x04, 0x7F);
bb_write_embedded(priv, 0x0D, 0x01);
}
return result;
}
/*
* Description: Set ShortSlotTime mode
*
* Parameters:
* In:
* priv - Device Structure
* Out:
* none
*
* Return Value: none
*
*/
void
bb_set_short_slot_time(struct vnt_private *priv)
{
unsigned char by_bb_rx_conf = 0;
unsigned char by_bb_vga = 0;
bb_read_embedded(priv, 0x0A, &by_bb_rx_conf); /* CR10 */
if (priv->short_slot_time)
by_bb_rx_conf &= 0xDF; /* 1101 1111 */
else
by_bb_rx_conf |= 0x20; /* 0010 0000 */
/* patch for 3253B0 Baseband with Cardbus module */
bb_read_embedded(priv, 0xE7, &by_bb_vga);
if (by_bb_vga == priv->abyBBVGA[0])
by_bb_rx_conf |= 0x20; /* 0010 0000 */
bb_write_embedded(priv, 0x0A, by_bb_rx_conf); /* CR10 */
}
void bb_set_vga_gain_offset(struct vnt_private *priv, unsigned char by_data)
{
unsigned char by_bb_rx_conf = 0;
bb_write_embedded(priv, 0xE7, by_data);
bb_read_embedded(priv, 0x0A, &by_bb_rx_conf); /* CR10 */
/* patch for 3253B0 Baseband with Cardbus module */
if (by_data == priv->abyBBVGA[0])
by_bb_rx_conf |= 0x20; /* 0010 0000 */
else if (priv->short_slot_time)
by_bb_rx_conf &= 0xDF; /* 1101 1111 */
else
by_bb_rx_conf |= 0x20; /* 0010 0000 */
priv->byBBVGACurrent = by_data;
bb_write_embedded(priv, 0x0A, by_bb_rx_conf); /* CR10 */
}
/*
* Description: Baseband SoftwareReset
*
* Parameters:
* In:
* iobase - I/O base address
* Out:
* none
*
* Return Value: none
*
*/
void
bb_software_reset(struct vnt_private *priv)
{
bb_write_embedded(priv, 0x50, 0x40);
bb_write_embedded(priv, 0x50, 0);
bb_write_embedded(priv, 0x9C, 0x01);
bb_write_embedded(priv, 0x9C, 0);
}
/*
* Description: Set Tx Antenna mode
*
* Parameters:
* In:
* priv - Device Structure
* by_antenna_mode - Antenna Mode
* Out:
* none
*
* Return Value: none
*
*/
void
bb_set_tx_antenna_mode(struct vnt_private *priv, unsigned char by_antenna_mode)
{
unsigned char by_bb_tx_conf;
bb_read_embedded(priv, 0x09, &by_bb_tx_conf); /* CR09 */
if (by_antenna_mode == ANT_DIVERSITY) {
/* bit 1 is diversity */
by_bb_tx_conf |= 0x02;
} else if (by_antenna_mode == ANT_A) {
/* bit 2 is ANTSEL */
by_bb_tx_conf &= 0xF9; /* 1111 1001 */
} else if (by_antenna_mode == ANT_B) {
by_bb_tx_conf &= 0xFD; /* 1111 1101 */
by_bb_tx_conf |= 0x04;
}
bb_write_embedded(priv, 0x09, by_bb_tx_conf); /* CR09 */
}
/*
* Description: Set Rx Antenna mode
*
* Parameters:
* In:
* priv - Device Structure
* by_antenna_mode - Antenna Mode
* Out:
* none
*
* Return Value: none
*
*/
void
bb_set_rx_antenna_mode(struct vnt_private *priv, unsigned char by_antenna_mode)
{
unsigned char by_bb_rx_conf;
bb_read_embedded(priv, 0x0A, &by_bb_rx_conf); /* CR10 */
if (by_antenna_mode == ANT_DIVERSITY) {
by_bb_rx_conf |= 0x01;
} else if (by_antenna_mode == ANT_A) {
by_bb_rx_conf &= 0xFC; /* 1111 1100 */
} else if (by_antenna_mode == ANT_B) {
by_bb_rx_conf &= 0xFE; /* 1111 1110 */
by_bb_rx_conf |= 0x02;
}
bb_write_embedded(priv, 0x0A, by_bb_rx_conf); /* CR10 */
}
/*
* Description: bb_set_deep_sleep
*
* Parameters:
* In:
* priv - Device Structure
* Out:
* none
*
* Return Value: none
*
*/
void
bb_set_deep_sleep(struct vnt_private *priv, unsigned char by_local_id)
{
bb_write_embedded(priv, 0x0C, 0x17); /* CR12 */
bb_write_embedded(priv, 0x0D, 0xB9); /* CR13 */
}
| linux-master | drivers/staging/vt6655/baseband.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* Purpose:Implement functions to access eeprom
*
* Author: Jerry Chen
*
* Date: Jan 29, 2003
*
* Functions:
* SROMbyReadEmbedded - Embedded read eeprom via MAC
* SROMbWriteEmbedded - Embedded write eeprom via MAC
* SROMvRegBitsOn - Set Bits On in eeprom
* SROMvRegBitsOff - Clear Bits Off in eeprom
* SROMbIsRegBitsOn - Test if Bits On in eeprom
* SROMbIsRegBitsOff - Test if Bits Off in eeprom
* SROMvReadAllContents - Read all contents in eeprom
* SROMvWriteAllContents - Write all contents in eeprom
* SROMvReadEtherAddress - Read Ethernet Address in eeprom
* SROMvWriteEtherAddress - Write Ethernet Address in eeprom
* SROMvReadSubSysVenId - Read Sub_VID and Sub_SysId in eeprom
* SROMbAutoLoad - Auto Load eeprom to MAC register
*
* Revision History:
*
*/
#include "device.h"
#include "mac.h"
#include "srom.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
/*
* Description: Read a byte from EEPROM, by MAC I2C
*
* Parameters:
* In:
* iobase - I/O base address
* contnt_offset - address of EEPROM
* Out:
* none
*
* Return Value: data read
*
*/
unsigned char SROMbyReadEmbedded(void __iomem *iobase,
unsigned char contnt_offset)
{
unsigned short wDelay, wNoACK;
unsigned char byWait;
unsigned char byData;
unsigned char byOrg;
byData = 0xFF;
byOrg = ioread8(iobase + MAC_REG_I2MCFG);
/* turn off hardware retry for getting NACK */
iowrite8(byOrg & (~I2MCFG_NORETRY), iobase + MAC_REG_I2MCFG);
for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
iowrite8(EEP_I2C_DEV_ID, iobase + MAC_REG_I2MTGID);
iowrite8(contnt_offset, iobase + MAC_REG_I2MTGAD);
/* issue read command */
iowrite8(I2MCSR_EEMR, iobase + MAC_REG_I2MCSR);
/* wait DONE be set */
for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
byWait = ioread8(iobase + MAC_REG_I2MCSR);
if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
break;
udelay(CB_DELAY_LOOP_WAIT);
}
if ((wDelay < W_MAX_TIMEOUT) &&
(!(byWait & I2MCSR_NACK))) {
break;
}
}
byData = ioread8(iobase + MAC_REG_I2MDIPT);
iowrite8(byOrg, iobase + MAC_REG_I2MCFG);
return byData;
}
/*
* Description: Read all contents of eeprom to buffer
*
* Parameters:
* In:
* iobase - I/O base address
* Out:
* pbyEepromRegs - EEPROM content Buffer
*
* Return Value: none
*
*/
void SROMvReadAllContents(void __iomem *iobase, unsigned char *pbyEepromRegs)
{
int ii;
/* ii = Rom Address */
for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
*pbyEepromRegs = SROMbyReadEmbedded(iobase,
(unsigned char)ii);
pbyEepromRegs++;
}
}
/*
* Description: Read Ethernet Address from eeprom to buffer
*
* Parameters:
* In:
* iobase - I/O base address
* Out:
* pbyEtherAddress - Ethernet Address buffer
*
* Return Value: none
*
*/
void SROMvReadEtherAddress(void __iomem *iobase,
unsigned char *pbyEtherAddress)
{
unsigned char ii;
/* ii = Rom Address */
for (ii = 0; ii < ETH_ALEN; ii++) {
*pbyEtherAddress = SROMbyReadEmbedded(iobase, ii);
pbyEtherAddress++;
}
}
| linux-master | drivers/staging/vt6655/srom.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
*/
#include "baseband.h"
#include "channel.h"
#include "device.h"
#include "rf.h"
static struct ieee80211_rate vnt_rates_bg[] = {
{ .bitrate = 10, .hw_value = RATE_1M },
{ .bitrate = 20, .hw_value = RATE_2M },
{ .bitrate = 55, .hw_value = RATE_5M },
{ .bitrate = 110, .hw_value = RATE_11M },
{ .bitrate = 60, .hw_value = RATE_6M },
{ .bitrate = 90, .hw_value = RATE_9M },
{ .bitrate = 120, .hw_value = RATE_12M },
{ .bitrate = 180, .hw_value = RATE_18M },
{ .bitrate = 240, .hw_value = RATE_24M },
{ .bitrate = 360, .hw_value = RATE_36M },
{ .bitrate = 480, .hw_value = RATE_48M },
{ .bitrate = 540, .hw_value = RATE_54M },
};
static struct ieee80211_channel vnt_channels_2ghz[] = {
{ .center_freq = 2412, .hw_value = 1 },
{ .center_freq = 2417, .hw_value = 2 },
{ .center_freq = 2422, .hw_value = 3 },
{ .center_freq = 2427, .hw_value = 4 },
{ .center_freq = 2432, .hw_value = 5 },
{ .center_freq = 2437, .hw_value = 6 },
{ .center_freq = 2442, .hw_value = 7 },
{ .center_freq = 2447, .hw_value = 8 },
{ .center_freq = 2452, .hw_value = 9 },
{ .center_freq = 2457, .hw_value = 10 },
{ .center_freq = 2462, .hw_value = 11 },
{ .center_freq = 2467, .hw_value = 12 },
{ .center_freq = 2472, .hw_value = 13 },
{ .center_freq = 2484, .hw_value = 14 }
};
static struct ieee80211_supported_band vnt_supported_2ghz_band = {
.channels = vnt_channels_2ghz,
.n_channels = ARRAY_SIZE(vnt_channels_2ghz),
.bitrates = vnt_rates_bg,
.n_bitrates = ARRAY_SIZE(vnt_rates_bg),
};
static void vnt_init_band(struct vnt_private *priv,
struct ieee80211_supported_band *supported_band,
enum nl80211_band band)
{
int i;
for (i = 0; i < supported_band->n_channels; i++) {
supported_band->channels[i].max_power = 0x3f;
supported_band->channels[i].flags =
IEEE80211_CHAN_NO_HT40;
}
priv->hw->wiphy->bands[band] = supported_band;
}
void vnt_init_bands(struct vnt_private *priv)
{
vnt_init_band(priv, &vnt_supported_2ghz_band, NL80211_BAND_2GHZ);
}
/**
* set_channel() - Set NIC media channel
*
* @priv: The adapter to be set
* @ch: Channel to be set
*
* Return Value: true if succeeded; false if failed.
*
*/
bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
{
bool ret = true;
if (priv->byCurrentCh == ch->hw_value)
return ret;
/* Set VGA to max sensitivity */
if (priv->bUpdateBBVGA &&
priv->byBBVGACurrent != priv->abyBBVGA[0]) {
priv->byBBVGACurrent = priv->abyBBVGA[0];
bb_set_vga_gain_offset(priv, priv->byBBVGACurrent);
}
/* clear NAV */
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_MACCR, MACCR_CLRNAV);
/* TX_PE will reserve 3 us for MAX2829 A mode only,
* it is for better TX throughput
*/
priv->byCurrentCh = ch->hw_value;
ret &= RFbSelectChannel(priv, priv->byRFType,
ch->hw_value);
/* Init Synthesizer Table */
if (priv->bEnablePSMode)
rf_write_wake_prog_syn(priv, priv->byRFType, ch->hw_value);
bb_software_reset(priv);
if (priv->local_id > REV_ID_VT3253_B1) {
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
/* set HW default power register */
VT6655_MAC_SELECT_PAGE1(priv->port_offset);
RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWRCCK);
RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWROFDM);
VT6655_MAC_SELECT_PAGE0(priv->port_offset);
spin_unlock_irqrestore(&priv->lock, flags);
}
if (priv->byBBType == BB_TYPE_11B)
RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
else
RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
return ret;
}
| linux-master | drivers/staging/vt6655/channel.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* Purpose: Handles 802.11 power management functions
*
* Author: Lyndon Chen
*
* Date: July 17, 2002
*
* Functions:
* PSvEnablePowerSaving - Enable Power Saving Mode
* PSvDiasblePowerSaving - Disable Power Saving Mode
* PSbConsiderPowerDown - Decide if we can Power Down
* PSvSendPSPOLL - Send PS-POLL packet
* PSbSendNullPacket - Send Null packet
* PSbIsNextTBTTWakeUp - Decide if we need to wake up at next Beacon
*
* Revision History:
*
*/
#include "mac.h"
#include "device.h"
#include "power.h"
#include "card.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
/*
*
* Routine Description:
* Enable hw power saving functions
*
* Return Value:
* None.
*
*/
void PSvEnablePowerSaving(struct vnt_private *priv,
unsigned short wListenInterval)
{
u16 wAID = priv->current_aid | BIT(14) | BIT(15);
/* set period of power up before TBTT */
iowrite16(C_PWBT, priv->port_offset + MAC_REG_PWBT);
if (priv->op_mode != NL80211_IFTYPE_ADHOC) {
/* set AID */
iowrite16(wAID, priv->port_offset + MAC_REG_AIDATIM);
}
/* Set AutoSleep */
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* Set HWUTSF */
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
if (wListenInterval >= 2) {
/* clear always listen beacon */
vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
/* first time set listen next beacon */
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_LNBCN);
} else {
/* always listen beacon */
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
}
/* enable power saving hw function */
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_PSEN);
priv->bEnablePSMode = true;
priv->bPWBitOn = true;
pr_debug("PS:Power Saving Mode Enable...\n");
}
/*
*
* Routine Description:
* Disable hw power saving functions
*
* Return Value:
* None.
*
*/
void PSvDisablePowerSaving(struct vnt_private *priv)
{
/* disable power saving hw function */
MACbPSWakeup(priv);
/* clear AutoSleep */
vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* clear HWUTSF */
vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
/* set always listen beacon */
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
priv->bEnablePSMode = false;
priv->bPWBitOn = false;
}
/*
*
* Routine Description:
* Check if Next TBTT must wake up
*
* Return Value:
* None.
*
*/
bool PSbIsNextTBTTWakeUp(struct vnt_private *priv)
{
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &hw->conf;
bool wake_up = false;
if (conf->listen_interval > 1) {
if (!priv->wake_up_count)
priv->wake_up_count = conf->listen_interval;
--priv->wake_up_count;
if (priv->wake_up_count == 1) {
/* Turn on wake up to listen next beacon */
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_LNBCN);
wake_up = true;
}
}
return wake_up;
}
| linux-master | drivers/staging/vt6655/power.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* Purpose: rf function code
*
* Author: Jerry Chen
*
* Date: Feb. 19, 2004
*
* Functions:
* IFRFbWriteEmbedded - Embedded write RF register via MAC
*
* Revision History:
* RobertYu 2005
* chester 2008
*
*/
#include "mac.h"
#include "srom.h"
#include "rf.h"
#include "baseband.h"
#define BY_AL2230_REG_LEN 23 /* 24bit */
#define CB_AL2230_INIT_SEQ 15
#define SWITCH_CHANNEL_DELAY_AL2230 200 /* us */
#define AL2230_PWR_IDX_LEN 64
#define BY_AL7230_REG_LEN 23 /* 24bit */
#define CB_AL7230_INIT_SEQ 16
#define SWITCH_CHANNEL_DELAY_AL7230 200 /* us */
#define AL7230_PWR_IDX_LEN 64
static const unsigned long al2230_init_table[CB_AL2230_INIT_SEQ] = {
0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x01A00200 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x00FFF300 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0F4DC500 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0805B600 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0146C700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x00068800 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0403B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x00DBBA00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0BDFFC00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x00000D00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x00580F00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW
};
static const unsigned long al2230_channel_table0[CB_MAX_CHANNEL] = {
0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
0x03F7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
0x03E7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */
};
static const unsigned long al2230_channel_table1[CB_MAX_CHANNEL] = {
0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
0x06666100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */
};
static unsigned long al2230_power_table[AL2230_PWR_IDX_LEN] = {
0x04040900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04041900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04042900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04043900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04044900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04045900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04046900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04047900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04048900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04049900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0404A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0404B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0404C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0404D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0404E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0404F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04050900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04051900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04052900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04053900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04054900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04055900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04056900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04057900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04058900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04059900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0405A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0405B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0405C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0405D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0405E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0405F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04060900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04061900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04062900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04063900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04064900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04065900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04066900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04067900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04068900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04069900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0406A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0406B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0406C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0406D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0406E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0406F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04070900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04071900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04072900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04073900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04074900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04075900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04076900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04077900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04078900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04079900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0407A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0407B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0407C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0407D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0407E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x0407F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW
};
/*
* Description: Write to IF/RF, by embedded programming
*
* Parameters:
* In:
* iobase - I/O base address
* dwData - data to write
* Out:
* none
*
* Return Value: true if succeeded; false if failed.
*
*/
bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
{
void __iomem *iobase = priv->port_offset;
unsigned short ww;
unsigned long dwValue;
iowrite32((u32)dwData, iobase + MAC_REG_IFREGCTL);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
dwValue = ioread32(iobase + MAC_REG_IFREGCTL);
if (dwValue & IFREGCTL_DONE)
break;
}
if (ww == W_MAX_TIMEOUT)
return false;
return true;
}
/*
* Description: AIROHA IFRF chip init function
*
* Parameters:
* In:
* iobase - I/O base address
* Out:
* none
*
* Return Value: true if succeeded; false if failed.
*
*/
static bool RFbAL2230Init(struct vnt_private *priv)
{
void __iomem *iobase = priv->port_offset;
int ii;
bool ret;
ret = true;
/* 3-wire control for normal mode */
iowrite8(0, iobase + MAC_REG_SOFTPWRCTL);
vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL,
(SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV));
/* PLL Off */
vt6655_mac_word_reg_bits_off(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
/* patch abnormal AL2230 frequency output */
IFRFbWriteEmbedded(priv, (0x07168700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
ret &= IFRFbWriteEmbedded(priv, al2230_init_table[ii]);
MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */
/* PLL On */
vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
MACvTimer0MicroSDelay(priv, 150);/* 150us */
ret &= IFRFbWriteEmbedded(priv, (0x00d80f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
ret &= IFRFbWriteEmbedded(priv, (0x00780f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
ret &= IFRFbWriteEmbedded(priv,
al2230_init_table[CB_AL2230_INIT_SEQ - 1]);
vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
/* 3-wire control for power saving mode */
iowrite8(PSSIG_WPE3 | PSSIG_WPE2, iobase + MAC_REG_PSPWRSIG);
return ret;
}
static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
void __iomem *iobase = priv->port_offset;
bool ret;
ret = true;
ret &= IFRFbWriteEmbedded(priv, al2230_channel_table0[byChannel - 1]);
ret &= IFRFbWriteEmbedded(priv, al2230_channel_table1[byChannel - 1]);
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
iowrite8(byChannel & 0x7F, iobase + MAC_REG_CHANNEL);
MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230);
/* Set Channel[7] = 1 to tell H/W channel change is done. */
iowrite8(byChannel | 0x80, iobase + MAC_REG_CHANNEL);
return ret;
}
/*
* Description: RF init function
*
* Parameters:
* In:
* byBBType
* byRFType
* Out:
* none
*
* Return Value: true if succeeded; false if failed.
*
*/
bool RFbInit(struct vnt_private *priv)
{
bool ret = true;
switch (priv->byRFType) {
case RF_AIROHA:
case RF_AL2230S:
priv->max_pwr_level = AL2230_PWR_IDX_LEN;
ret = RFbAL2230Init(priv);
break;
case RF_NOTHING:
ret = true;
break;
default:
ret = false;
break;
}
return ret;
}
/*
* Description: Select channel
*
* Parameters:
* In:
* byRFType
* byChannel - Channel number
* Out:
* none
*
* Return Value: true if succeeded; false if failed.
*
*/
bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
u16 byChannel)
{
bool ret = true;
switch (byRFType) {
case RF_AIROHA:
case RF_AL2230S:
ret = RFbAL2230SelectChannel(priv, byChannel);
break;
/*{{ RobertYu: 20050104 */
case RF_NOTHING:
ret = true;
break;
default:
ret = false;
break;
}
return ret;
}
/*
* Description: Write WakeProgSyn
*
* Parameters:
* In:
* priv - Device Structure
* rf_type - RF type
* channel - Channel number
*
* Return Value: true if succeeded; false if failed.
*
*/
bool rf_write_wake_prog_syn(struct vnt_private *priv, unsigned char rf_type,
u16 channel)
{
void __iomem *iobase = priv->port_offset;
int i;
unsigned char init_count = 0;
unsigned char sleep_count = 0;
unsigned short idx = MISCFIFO_SYNDATA_IDX;
iowrite16(0, iobase + MAC_REG_MISCFFNDEX);
switch (rf_type) {
case RF_AIROHA:
case RF_AL2230S:
if (channel > CB_MAX_CHANNEL_24G)
return false;
/* Init Reg + Channel Reg (2) */
init_count = CB_AL2230_INIT_SEQ + 2;
sleep_count = 0;
for (i = 0; i < CB_AL2230_INIT_SEQ; i++)
MACvSetMISCFifo(priv, idx++, al2230_init_table[i]);
MACvSetMISCFifo(priv, idx++, al2230_channel_table0[channel - 1]);
MACvSetMISCFifo(priv, idx++, al2230_channel_table1[channel - 1]);
break;
/* Need to check, PLLON need to be low for channel setting */
case RF_NOTHING:
return true;
default:
return false;
}
MACvSetMISCFifo(priv, MISCFIFO_SYNINFO_IDX, (unsigned long)MAKEWORD(sleep_count, init_count));
return true;
}
/*
* Description: Set Tx power
*
* Parameters:
* In:
* iobase - I/O base address
* dwRFPowerTable - RF Tx Power Setting
* Out:
* none
*
* Return Value: true if succeeded; false if failed.
*
*/
bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH)
{
bool ret;
unsigned char byPwr = 0;
unsigned char byDec = 0;
if (priv->dwDiagRefCount != 0)
return true;
if ((uCH < 1) || (uCH > CB_MAX_CHANNEL))
return false;
switch (rate) {
case RATE_1M:
case RATE_2M:
case RATE_5M:
case RATE_11M:
if (uCH > CB_MAX_CHANNEL_24G)
return false;
byPwr = priv->abyCCKPwrTbl[uCH];
break;
case RATE_6M:
case RATE_9M:
case RATE_12M:
case RATE_18M:
byPwr = priv->abyOFDMPwrTbl[uCH];
byDec = byPwr + 10;
if (byDec >= priv->max_pwr_level)
byDec = priv->max_pwr_level - 1;
byPwr = byDec;
break;
case RATE_24M:
case RATE_36M:
case RATE_48M:
case RATE_54M:
byPwr = priv->abyOFDMPwrTbl[uCH];
break;
}
if (priv->byCurPwr == byPwr)
return true;
ret = RFbRawSetPower(priv, byPwr, rate);
if (ret)
priv->byCurPwr = byPwr;
return ret;
}
/*
* Description: Set Tx power
*
* Parameters:
* In:
* iobase - I/O base address
* dwRFPowerTable - RF Tx Power Setting
* Out:
* none
*
* Return Value: true if succeeded; false if failed.
*
*/
bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
unsigned int rate)
{
bool ret = true;
if (byPwr >= priv->max_pwr_level)
return false;
switch (priv->byRFType) {
case RF_AIROHA:
ret &= IFRFbWriteEmbedded(priv, al2230_power_table[byPwr]);
if (rate <= RATE_11M)
ret &= IFRFbWriteEmbedded(priv, 0x0001B400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
else
ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
break;
case RF_AL2230S:
ret &= IFRFbWriteEmbedded(priv, al2230_power_table[byPwr]);
if (rate <= RATE_11M) {
ret &= IFRFbWriteEmbedded(priv, 0x040C1400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
ret &= IFRFbWriteEmbedded(priv, 0x00299B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
} else {
ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
ret &= IFRFbWriteEmbedded(priv, 0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
}
break;
default:
break;
}
return ret;
}
/*
*
* Routine Description:
* Translate RSSI to dBm
*
* Parameters:
* In:
* priv - The adapter to be translated
* byCurrRSSI - RSSI to be translated
* Out:
* pdwdbm - Translated dbm number
*
* Return Value: none
*
*/
void
RFvRSSITodBm(struct vnt_private *priv, unsigned char byCurrRSSI, long *pldBm)
{
unsigned char byIdx = (((byCurrRSSI & 0xC0) >> 6) & 0x03);
long b = (byCurrRSSI & 0x3F);
long a = 0;
unsigned char abyAIROHARF[4] = {0, 18, 0, 40};
switch (priv->byRFType) {
case RF_AIROHA:
case RF_AL2230S:
a = abyAIROHARF[byIdx];
break;
default:
break;
}
*pldBm = -1 * (a + b * 2);
}
| linux-master | drivers/staging/vt6655/rf.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* Purpose: driver entry for initial, open, close, tx and rx.
*
* Author: Lyndon Chen
*
* Date: Jan 8, 2003
*
* Functions:
*
* vt6655_probe - module initial (insmod) driver entry
* vt6655_remove - module remove entry
* device_free_info - device structure resource free function
* device_print_info - print out resource
* device_rx_srv - rx service function
* device_alloc_rx_buf - rx buffer pre-allocated function
* device_free_rx_buf - free rx buffer function
* device_free_tx_buf - free tx buffer function
* device_init_rd0_ring - initial rd dma0 ring
* device_init_rd1_ring - initial rd dma1 ring
* device_init_td0_ring - initial tx dma0 ring buffer
* device_init_td1_ring - initial tx dma1 ring buffer
* device_init_registers - initial MAC & BBP & RF internal registers.
* device_init_rings - initial tx/rx ring buffer
* device_free_rings - free all allocated ring buffer
* device_tx_srv - tx interrupt service function
*
* Revision History:
*/
#include <linux/file.h>
#include "device.h"
#include "card.h"
#include "channel.h"
#include "baseband.h"
#include "mac.h"
#include "power.h"
#include "rxtx.h"
#include "dpc.h"
#include "rf.h"
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/slab.h>
/*--------------------- Static Definitions -------------------------*/
/*
* Define module options
*/
MODULE_AUTHOR("VIA Networking Technologies, Inc., <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VIA Networking Solomon-A/B/G Wireless LAN Adapter Driver");
#define DEVICE_PARAM(N, D)
#define RX_DESC_MIN0 16
#define RX_DESC_MAX0 128
#define RX_DESC_DEF0 32
DEVICE_PARAM(RxDescriptors0, "Number of receive descriptors0");
#define RX_DESC_MIN1 16
#define RX_DESC_MAX1 128
#define RX_DESC_DEF1 32
DEVICE_PARAM(RxDescriptors1, "Number of receive descriptors1");
#define TX_DESC_MIN0 16
#define TX_DESC_MAX0 128
#define TX_DESC_DEF0 32
DEVICE_PARAM(TxDescriptors0, "Number of transmit descriptors0");
#define TX_DESC_MIN1 16
#define TX_DESC_MAX1 128
#define TX_DESC_DEF1 64
DEVICE_PARAM(TxDescriptors1, "Number of transmit descriptors1");
#define INT_WORKS_DEF 20
#define INT_WORKS_MIN 10
#define INT_WORKS_MAX 64
DEVICE_PARAM(int_works, "Number of packets per interrupt services");
#define RTS_THRESH_DEF 2347
#define FRAG_THRESH_DEF 2346
#define SHORT_RETRY_MIN 0
#define SHORT_RETRY_MAX 31
#define SHORT_RETRY_DEF 8
DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits");
#define LONG_RETRY_MIN 0
#define LONG_RETRY_MAX 15
#define LONG_RETRY_DEF 4
DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
/* BasebandType[] baseband type selected
* 0: indicate 802.11a type
* 1: indicate 802.11b type
* 2: indicate 802.11g type
*/
#define BBP_TYPE_MIN 0
#define BBP_TYPE_MAX 2
#define BBP_TYPE_DEF 2
DEVICE_PARAM(BasebandType, "baseband type");
/*
* Static vars definitions
*/
static const struct pci_device_id vt6655_pci_id_table[] = {
{ PCI_VDEVICE(VIA, 0x3253) },
{ 0, }
};
/*--------------------- Static Functions --------------------------*/
static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent);
static void device_free_info(struct vnt_private *priv);
static void device_print_info(struct vnt_private *priv);
static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr);
static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr);
static int device_init_rd0_ring(struct vnt_private *priv);
static int device_init_rd1_ring(struct vnt_private *priv);
static int device_init_td0_ring(struct vnt_private *priv);
static int device_init_td1_ring(struct vnt_private *priv);
static int device_rx_srv(struct vnt_private *priv, unsigned int idx);
static int device_tx_srv(struct vnt_private *priv, unsigned int idx);
static bool device_alloc_rx_buf(struct vnt_private *, struct vnt_rx_desc *);
static void device_free_rx_buf(struct vnt_private *priv,
struct vnt_rx_desc *rd);
static void device_init_registers(struct vnt_private *priv);
static void device_free_tx_buf(struct vnt_private *, struct vnt_tx_desc *);
static void device_free_td0_ring(struct vnt_private *priv);
static void device_free_td1_ring(struct vnt_private *priv);
static void device_free_rd0_ring(struct vnt_private *priv);
static void device_free_rd1_ring(struct vnt_private *priv);
static void device_free_rings(struct vnt_private *priv);
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
static void vt6655_remove(struct pci_dev *pcid)
{
struct vnt_private *priv = pci_get_drvdata(pcid);
if (!priv)
return;
device_free_info(priv);
}
static void device_get_options(struct vnt_private *priv)
{
struct vnt_options *opts = &priv->opts;
opts->rx_descs0 = RX_DESC_DEF0;
opts->rx_descs1 = RX_DESC_DEF1;
opts->tx_descs[0] = TX_DESC_DEF0;
opts->tx_descs[1] = TX_DESC_DEF1;
opts->int_works = INT_WORKS_DEF;
opts->short_retry = SHORT_RETRY_DEF;
opts->long_retry = LONG_RETRY_DEF;
opts->bbp_type = BBP_TYPE_DEF;
}
static void
device_set_options(struct vnt_private *priv)
{
priv->byShortRetryLimit = priv->opts.short_retry;
priv->byLongRetryLimit = priv->opts.long_retry;
priv->byBBType = priv->opts.bbp_type;
priv->byPacketType = priv->byBBType;
priv->byAutoFBCtrl = AUTO_FB_0;
priv->bUpdateBBVGA = true;
priv->preamble_type = 0;
pr_debug(" byShortRetryLimit= %d\n", (int)priv->byShortRetryLimit);
pr_debug(" byLongRetryLimit= %d\n", (int)priv->byLongRetryLimit);
pr_debug(" preamble_type= %d\n", (int)priv->preamble_type);
pr_debug(" byShortPreamble= %d\n", (int)priv->byShortPreamble);
pr_debug(" byBBType= %d\n", (int)priv->byBBType);
}
static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr)
{
iowrite8(1, iobase + MAC_REG_PAGE1SEL);
for (int i = 0; i < 6; i++)
iowrite8(mac_addr[i], iobase + MAC_REG_BSSID0 + i);
iowrite8(0, iobase + MAC_REG_PAGE1SEL);
}
static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr)
{
iowrite8(1, iobase + MAC_REG_PAGE1SEL);
for (int i = 0; i < 6; i++)
mac_addr[i] = ioread8(iobase + MAC_REG_PAR0 + i);
iowrite8(0, iobase + MAC_REG_PAGE1SEL);
}
static void vt6655_mac_dma_ctl(void __iomem *iobase, u8 reg_index)
{
u32 reg_value;
reg_value = ioread32(iobase + reg_index);
if (reg_value & DMACTL_RUN)
iowrite32(DMACTL_WAKE, iobase + reg_index);
else
iowrite32(DMACTL_RUN, iobase + reg_index);
}
static void vt6655_mac_set_bits(void __iomem *iobase, u32 mask)
{
u32 reg_value;
reg_value = ioread32(iobase + MAC_REG_ENCFG);
reg_value = reg_value | mask;
iowrite32(reg_value, iobase + MAC_REG_ENCFG);
}
static void vt6655_mac_clear_bits(void __iomem *iobase, u32 mask)
{
u32 reg_value;
reg_value = ioread32(iobase + MAC_REG_ENCFG);
reg_value = reg_value & ~mask;
iowrite32(reg_value, iobase + MAC_REG_ENCFG);
}
static void vt6655_mac_en_protect_md(void __iomem *iobase)
{
vt6655_mac_set_bits(iobase, ENCFG_PROTECTMD);
}
static void vt6655_mac_dis_protect_md(void __iomem *iobase)
{
vt6655_mac_clear_bits(iobase, ENCFG_PROTECTMD);
}
static void vt6655_mac_en_barker_preamble_md(void __iomem *iobase)
{
vt6655_mac_set_bits(iobase, ENCFG_BARKERPREAM);
}
static void vt6655_mac_dis_barker_preamble_md(void __iomem *iobase)
{
vt6655_mac_clear_bits(iobase, ENCFG_BARKERPREAM);
}
/*
* Initialisation of MAC & BBP registers
*/
static void device_init_registers(struct vnt_private *priv)
{
unsigned long flags;
unsigned int ii;
unsigned char byValue;
unsigned char byCCKPwrdBm = 0;
unsigned char byOFDMPwrdBm = 0;
MACbShutdown(priv);
bb_software_reset(priv);
/* Do MACbSoftwareReset in MACvInitialize */
MACbSoftwareReset(priv);
priv->bAES = false;
/* Only used in 11g type, sync with ERP IE */
priv->bProtectMode = false;
priv->bNonERPPresent = false;
priv->bBarkerPreambleMd = false;
priv->wCurrentRate = RATE_1M;
priv->byTopOFDMBasicRate = RATE_24M;
priv->byTopCCKBasicRate = RATE_1M;
/* init MAC */
MACvInitialize(priv);
/* Get Local ID */
priv->local_id = ioread8(priv->port_offset + MAC_REG_LOCALID);
spin_lock_irqsave(&priv->lock, flags);
SROMvReadAllContents(priv->port_offset, priv->abyEEPROM);
spin_unlock_irqrestore(&priv->lock, flags);
/* Get Channel range */
priv->byMinChannel = 1;
priv->byMaxChannel = CB_MAX_CHANNEL;
/* Get Antena */
byValue = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_ANTENNA);
if (byValue & EEP_ANTINV)
priv->bTxRxAntInv = true;
else
priv->bTxRxAntInv = false;
byValue &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
/* if not set default is All */
if (byValue == 0)
byValue = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
if (byValue == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
priv->byAntennaCount = 2;
priv->byTxAntennaMode = ANT_B;
priv->dwTxAntennaSel = 1;
priv->dwRxAntennaSel = 1;
if (priv->bTxRxAntInv)
priv->byRxAntennaMode = ANT_A;
else
priv->byRxAntennaMode = ANT_B;
} else {
priv->byAntennaCount = 1;
priv->dwTxAntennaSel = 0;
priv->dwRxAntennaSel = 0;
if (byValue & EEP_ANTENNA_AUX) {
priv->byTxAntennaMode = ANT_A;
if (priv->bTxRxAntInv)
priv->byRxAntennaMode = ANT_B;
else
priv->byRxAntennaMode = ANT_A;
} else {
priv->byTxAntennaMode = ANT_B;
if (priv->bTxRxAntInv)
priv->byRxAntennaMode = ANT_A;
else
priv->byRxAntennaMode = ANT_B;
}
}
/* Set initial antenna mode */
bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
/* zonetype initial */
priv->byOriginalZonetype = priv->abyEEPROM[EEP_OFS_ZONETYPE];
if (!priv->bZoneRegExist)
priv->byZoneType = priv->abyEEPROM[EEP_OFS_ZONETYPE];
pr_debug("priv->byZoneType = %x\n", priv->byZoneType);
/* Init RF module */
RFbInit(priv);
/* Get Desire Power Value */
priv->byCurPwr = 0xFF;
priv->byCCKPwr = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_PWR_CCK);
priv->byOFDMPwrG = SROMbyReadEmbedded(priv->port_offset,
EEP_OFS_PWR_OFDMG);
/* Load power Table */
for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
priv->abyCCKPwrTbl[ii + 1] =
SROMbyReadEmbedded(priv->port_offset,
(unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
if (priv->abyCCKPwrTbl[ii + 1] == 0)
priv->abyCCKPwrTbl[ii + 1] = priv->byCCKPwr;
priv->abyOFDMPwrTbl[ii + 1] =
SROMbyReadEmbedded(priv->port_offset,
(unsigned char)(ii + EEP_OFS_OFDM_PWR_TBL));
if (priv->abyOFDMPwrTbl[ii + 1] == 0)
priv->abyOFDMPwrTbl[ii + 1] = priv->byOFDMPwrG;
priv->abyCCKDefaultPwr[ii + 1] = byCCKPwrdBm;
priv->abyOFDMDefaultPwr[ii + 1] = byOFDMPwrdBm;
}
/* recover 12,13 ,14channel for EUROPE by 11 channel */
for (ii = 11; ii < 14; ii++) {
priv->abyCCKPwrTbl[ii] = priv->abyCCKPwrTbl[10];
priv->abyOFDMPwrTbl[ii] = priv->abyOFDMPwrTbl[10];
}
/* Load OFDM A Power Table */
for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
priv->abyOFDMPwrTbl[ii + CB_MAX_CHANNEL_24G + 1] =
SROMbyReadEmbedded(priv->port_offset,
(unsigned char)(ii + EEP_OFS_OFDMA_PWR_TBL));
priv->abyOFDMDefaultPwr[ii + CB_MAX_CHANNEL_24G + 1] =
SROMbyReadEmbedded(priv->port_offset,
(unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
}
if (priv->local_id > REV_ID_VT3253_B1) {
VT6655_MAC_SELECT_PAGE1(priv->port_offset);
iowrite8(MSRCTL1_TXPWR | MSRCTL1_CSAPAREN, priv->port_offset + MAC_REG_MSRCTL + 1);
VT6655_MAC_SELECT_PAGE0(priv->port_offset);
}
/* use relative tx timeout and 802.11i D4 */
vt6655_mac_word_reg_bits_on(priv->port_offset, MAC_REG_CFG,
(CFG_TKIPOPT | CFG_NOTXTIMEOUT));
/* set performance parameter by registry */
vt6655_mac_set_short_retry_limit(priv, priv->byShortRetryLimit);
MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
/* reset TSF counter */
iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
/* enable TSF counter */
iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
/* initialize BBP registers */
bb_vt3253_init(priv);
if (priv->bUpdateBBVGA) {
priv->byBBVGACurrent = priv->abyBBVGA[0];
priv->byBBVGANew = priv->byBBVGACurrent;
bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
}
bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
/* Set BB and packet type at the same time. */
/* Set Short Slot Time, xIFS, and RSPINF. */
priv->wCurrentRate = RATE_54M;
priv->radio_off = false;
priv->byRadioCtl = SROMbyReadEmbedded(priv->port_offset,
EEP_OFS_RADIOCTL);
priv->hw_radio_off = false;
if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
/* Get GPIO */
priv->byGPIO = ioread8(priv->port_offset + MAC_REG_GPIOCTL1);
if (((priv->byGPIO & GPIO0_DATA) &&
!(priv->byRadioCtl & EEP_RADIOCTL_INV)) ||
(!(priv->byGPIO & GPIO0_DATA) &&
(priv->byRadioCtl & EEP_RADIOCTL_INV)))
priv->hw_radio_off = true;
}
if (priv->hw_radio_off || priv->bRadioControlOff)
CARDbRadioPowerOff(priv);
/* get Permanent network address */
SROMvReadEtherAddress(priv->port_offset, priv->abyCurrentNetAddr);
pr_debug("Network address = %pM\n", priv->abyCurrentNetAddr);
/* reset Tx pointer */
CARDvSafeResetRx(priv);
/* reset Rx pointer */
CARDvSafeResetTx(priv);
if (priv->local_id <= REV_ID_VT3253_A1)
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
/* Turn On Rx DMA */
vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL0);
vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL1);
/* start the adapter */
iowrite8(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON, priv->port_offset + MAC_REG_HOSTCR);
}
static void device_print_info(struct vnt_private *priv)
{
dev_info(&priv->pcid->dev, "MAC=%pM IO=0x%lx Mem=0x%lx IRQ=%d\n",
priv->abyCurrentNetAddr, (unsigned long)priv->ioaddr,
(unsigned long)priv->port_offset, priv->pcid->irq);
}
static void device_free_info(struct vnt_private *priv)
{
if (!priv)
return;
if (priv->mac_hw)
ieee80211_unregister_hw(priv->hw);
if (priv->port_offset)
iounmap(priv->port_offset);
if (priv->pcid)
pci_release_regions(priv->pcid);
if (priv->hw)
ieee80211_free_hw(priv->hw);
}
static bool device_init_rings(struct vnt_private *priv)
{
void *vir_pool;
/*allocate all RD/TD rings a single pool*/
vir_pool = dma_alloc_coherent(&priv->pcid->dev,
priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
&priv->pool_dma, GFP_ATOMIC);
if (!vir_pool) {
dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
return false;
}
priv->aRD0Ring = vir_pool;
priv->aRD1Ring = vir_pool +
priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
priv->rd0_pool_dma = priv->pool_dma;
priv->rd1_pool_dma = priv->rd0_pool_dma +
priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev,
priv->opts.tx_descs[0] * PKT_BUF_SZ +
priv->opts.tx_descs[1] * PKT_BUF_SZ +
CB_BEACON_BUF_SIZE +
CB_MAX_BUF_SIZE,
&priv->tx_bufs_dma0, GFP_ATOMIC);
if (!priv->tx0_bufs) {
dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
dma_free_coherent(&priv->pcid->dev,
priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
vir_pool, priv->pool_dma);
return false;
}
priv->td0_pool_dma = priv->rd1_pool_dma +
priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
priv->td1_pool_dma = priv->td0_pool_dma +
priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
/* vir_pool: pvoid type */
priv->apTD0Rings = vir_pool
+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
+ priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
priv->apTD1Rings = vir_pool
+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
+ priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc)
+ priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
priv->tx1_bufs = priv->tx0_bufs +
priv->opts.tx_descs[0] * PKT_BUF_SZ;
priv->tx_beacon_bufs = priv->tx1_bufs +
priv->opts.tx_descs[1] * PKT_BUF_SZ;
priv->pbyTmpBuff = priv->tx_beacon_bufs +
CB_BEACON_BUF_SIZE;
priv->tx_bufs_dma1 = priv->tx_bufs_dma0 +
priv->opts.tx_descs[0] * PKT_BUF_SZ;
priv->tx_beacon_dma = priv->tx_bufs_dma1 +
priv->opts.tx_descs[1] * PKT_BUF_SZ;
return true;
}
static void device_free_rings(struct vnt_private *priv)
{
dma_free_coherent(&priv->pcid->dev,
priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
priv->aRD0Ring, priv->pool_dma);
dma_free_coherent(&priv->pcid->dev,
priv->opts.tx_descs[0] * PKT_BUF_SZ +
priv->opts.tx_descs[1] * PKT_BUF_SZ +
CB_BEACON_BUF_SIZE +
CB_MAX_BUF_SIZE,
priv->tx0_bufs, priv->tx_bufs_dma0);
}
static int device_init_rd0_ring(struct vnt_private *priv)
{
int i;
dma_addr_t curr = priv->rd0_pool_dma;
struct vnt_rx_desc *desc;
int ret;
/* Init the RD0 ring entries */
for (i = 0; i < priv->opts.rx_descs0;
i ++, curr += sizeof(struct vnt_rx_desc)) {
desc = &priv->aRD0Ring[i];
desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
if (!desc->rd_info) {
ret = -ENOMEM;
goto err_free_desc;
}
if (!device_alloc_rx_buf(priv, desc)) {
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
ret = -ENOMEM;
goto err_free_rd;
}
desc->next = &priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0];
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
}
if (i > 0)
priv->aRD0Ring[i - 1].next_desc = cpu_to_le32(priv->rd0_pool_dma);
priv->pCurrRD[0] = &priv->aRD0Ring[0];
return 0;
err_free_rd:
kfree(desc->rd_info);
err_free_desc:
while (i--) {
desc = &priv->aRD0Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
}
return ret;
}
static int device_init_rd1_ring(struct vnt_private *priv)
{
int i;
dma_addr_t curr = priv->rd1_pool_dma;
struct vnt_rx_desc *desc;
int ret;
/* Init the RD1 ring entries */
for (i = 0; i < priv->opts.rx_descs1;
i ++, curr += sizeof(struct vnt_rx_desc)) {
desc = &priv->aRD1Ring[i];
desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
if (!desc->rd_info) {
ret = -ENOMEM;
goto err_free_desc;
}
if (!device_alloc_rx_buf(priv, desc)) {
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
ret = -ENOMEM;
goto err_free_rd;
}
desc->next = &priv->aRD1Ring[(i + 1) % priv->opts.rx_descs1];
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
}
if (i > 0)
priv->aRD1Ring[i - 1].next_desc = cpu_to_le32(priv->rd1_pool_dma);
priv->pCurrRD[1] = &priv->aRD1Ring[0];
return 0;
err_free_rd:
kfree(desc->rd_info);
err_free_desc:
while (i--) {
desc = &priv->aRD1Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
}
return ret;
}
static void device_free_rd0_ring(struct vnt_private *priv)
{
int i;
for (i = 0; i < priv->opts.rx_descs0; i++) {
struct vnt_rx_desc *desc = &priv->aRD0Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
}
}
static void device_free_rd1_ring(struct vnt_private *priv)
{
int i;
for (i = 0; i < priv->opts.rx_descs1; i++) {
struct vnt_rx_desc *desc = &priv->aRD1Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
}
}
static int device_init_td0_ring(struct vnt_private *priv)
{
int i;
dma_addr_t curr;
struct vnt_tx_desc *desc;
int ret;
curr = priv->td0_pool_dma;
for (i = 0; i < priv->opts.tx_descs[0];
i++, curr += sizeof(struct vnt_tx_desc)) {
desc = &priv->apTD0Rings[i];
desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
if (!desc->td_info) {
ret = -ENOMEM;
goto err_free_desc;
}
desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
desc->next = &(priv->apTD0Rings[(i + 1) % priv->opts.tx_descs[0]]);
desc->next_desc = cpu_to_le32(curr +
sizeof(struct vnt_tx_desc));
}
if (i > 0)
priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
priv->apTailTD[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
return 0;
err_free_desc:
while (i--) {
desc = &priv->apTD0Rings[i];
kfree(desc->td_info);
}
return ret;
}
static int device_init_td1_ring(struct vnt_private *priv)
{
int i;
dma_addr_t curr;
struct vnt_tx_desc *desc;
int ret;
/* Init the TD ring entries */
curr = priv->td1_pool_dma;
for (i = 0; i < priv->opts.tx_descs[1];
i++, curr += sizeof(struct vnt_tx_desc)) {
desc = &priv->apTD1Rings[i];
desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
if (!desc->td_info) {
ret = -ENOMEM;
goto err_free_desc;
}
desc->td_info->buf = priv->tx1_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma1 + i * PKT_BUF_SZ;
desc->next = &(priv->apTD1Rings[(i + 1) % priv->opts.tx_descs[1]]);
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
}
if (i > 0)
priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
priv->apTailTD[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
return 0;
err_free_desc:
while (i--) {
desc = &priv->apTD1Rings[i];
kfree(desc->td_info);
}
return ret;
}
static void device_free_td0_ring(struct vnt_private *priv)
{
int i;
for (i = 0; i < priv->opts.tx_descs[0]; i++) {
struct vnt_tx_desc *desc = &priv->apTD0Rings[i];
struct vnt_td_info *td_info = desc->td_info;
dev_kfree_skb(td_info->skb);
kfree(desc->td_info);
}
}
static void device_free_td1_ring(struct vnt_private *priv)
{
int i;
for (i = 0; i < priv->opts.tx_descs[1]; i++) {
struct vnt_tx_desc *desc = &priv->apTD1Rings[i];
struct vnt_td_info *td_info = desc->td_info;
dev_kfree_skb(td_info->skb);
kfree(desc->td_info);
}
}
/*-----------------------------------------------------------------*/
static int device_rx_srv(struct vnt_private *priv, unsigned int idx)
{
struct vnt_rx_desc *rd;
int works = 0;
for (rd = priv->pCurrRD[idx];
rd->rd0.owner == OWNED_BY_HOST;
rd = rd->next) {
if (works++ > 15)
break;
if (!rd->rd_info->skb)
break;
if (vnt_receive_frame(priv, rd)) {
if (!device_alloc_rx_buf(priv, rd)) {
dev_err(&priv->pcid->dev,
"can not allocate rx buf\n");
break;
}
}
rd->rd0.owner = OWNED_BY_NIC;
}
priv->pCurrRD[idx] = rd;
return works;
}
static bool device_alloc_rx_buf(struct vnt_private *priv,
struct vnt_rx_desc *rd)
{
struct vnt_rd_info *rd_info = rd->rd_info;
rd_info->skb = dev_alloc_skb((int)priv->rx_buf_sz);
if (!rd_info->skb)
return false;
rd_info->skb_dma =
dma_map_single(&priv->pcid->dev,
skb_put(rd_info->skb, skb_tailroom(rd_info->skb)),
priv->rx_buf_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->pcid->dev, rd_info->skb_dma)) {
dev_kfree_skb(rd_info->skb);
rd_info->skb = NULL;
return false;
}
*((unsigned int *)&rd->rd0) = 0; /* FIX cast */
rd->rd0.res_count = cpu_to_le16(priv->rx_buf_sz);
rd->rd0.owner = OWNED_BY_NIC;
rd->rd1.req_count = cpu_to_le16(priv->rx_buf_sz);
rd->buff_addr = cpu_to_le32(rd_info->skb_dma);
return true;
}
static void device_free_rx_buf(struct vnt_private *priv,
struct vnt_rx_desc *rd)
{
struct vnt_rd_info *rd_info = rd->rd_info;
dma_unmap_single(&priv->pcid->dev, rd_info->skb_dma,
priv->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(rd_info->skb);
}
static const u8 fallback_rate0[5][5] = {
{RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
{RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
{RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
{RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
{RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
};
static const u8 fallback_rate1[5][5] = {
{RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
{RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
{RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
{RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
{RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
};
static int vnt_int_report_rate(struct vnt_private *priv,
struct vnt_td_info *context, u8 tsr0, u8 tsr1)
{
struct vnt_tx_fifo_head *fifo_head;
struct ieee80211_tx_info *info;
struct ieee80211_rate *rate;
u16 fb_option;
u8 tx_retry = (tsr0 & TSR0_NCR);
s8 idx;
if (!context)
return -ENOMEM;
if (!context->skb)
return -EINVAL;
fifo_head = (struct vnt_tx_fifo_head *)context->buf;
fb_option = (le16_to_cpu(fifo_head->fifo_ctl) &
(FIFOCTL_AUTO_FB_0 | FIFOCTL_AUTO_FB_1));
info = IEEE80211_SKB_CB(context->skb);
idx = info->control.rates[0].idx;
if (fb_option && !(tsr1 & TSR1_TERR)) {
u8 tx_rate;
u8 retry = tx_retry;
rate = ieee80211_get_tx_rate(priv->hw, info);
tx_rate = rate->hw_value - RATE_18M;
if (retry > 4)
retry = 4;
if (fb_option & FIFOCTL_AUTO_FB_0)
tx_rate = fallback_rate0[tx_rate][retry];
else if (fb_option & FIFOCTL_AUTO_FB_1)
tx_rate = fallback_rate1[tx_rate][retry];
if (info->band == NL80211_BAND_5GHZ)
idx = tx_rate - RATE_6M;
else
idx = tx_rate;
}
ieee80211_tx_info_clear_status(info);
info->status.rates[0].count = tx_retry;
if (!(tsr1 & TSR1_TERR)) {
info->status.rates[0].idx = idx;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
else
info->flags |= IEEE80211_TX_STAT_ACK;
}
return 0;
}
static int device_tx_srv(struct vnt_private *priv, unsigned int idx)
{
struct vnt_tx_desc *desc;
int works = 0;
unsigned char byTsr0;
unsigned char byTsr1;
for (desc = priv->apTailTD[idx]; priv->iTDUsed[idx] > 0; desc = desc->next) {
if (desc->td0.owner == OWNED_BY_NIC)
break;
if (works++ > 15)
break;
byTsr0 = desc->td0.tsr0;
byTsr1 = desc->td0.tsr1;
/* Only the status of first TD in the chain is correct */
if (desc->td1.tcr & TCR_STP) {
if ((desc->td_info->flags & TD_FLAGS_NETIF_SKB) != 0) {
if (!(byTsr1 & TSR1_TERR)) {
if (byTsr0 != 0) {
pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
(int)idx, byTsr1,
byTsr0);
}
} else {
pr_debug(" Tx[%d] dropped & tsr1[%02X] tsr0[%02X]\n",
(int)idx, byTsr1, byTsr0);
}
}
if (byTsr1 & TSR1_TERR) {
if ((desc->td_info->flags & TD_FLAGS_PRIV_SKB) != 0) {
pr_debug(" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X]\n",
(int)idx, byTsr1, byTsr0);
}
}
vnt_int_report_rate(priv, desc->td_info, byTsr0, byTsr1);
device_free_tx_buf(priv, desc);
priv->iTDUsed[idx]--;
}
}
priv->apTailTD[idx] = desc;
return works;
}
static void device_error(struct vnt_private *priv, unsigned short status)
{
if (status & ISR_FETALERR) {
dev_err(&priv->pcid->dev, "Hardware fatal error\n");
MACbShutdown(priv);
return;
}
}
static void device_free_tx_buf(struct vnt_private *priv,
struct vnt_tx_desc *desc)
{
struct vnt_td_info *td_info = desc->td_info;
struct sk_buff *skb = td_info->skb;
if (skb)
ieee80211_tx_status_irqsafe(priv->hw, skb);
td_info->skb = NULL;
td_info->flags = 0;
}
static void vnt_check_bb_vga(struct vnt_private *priv)
{
long dbm;
int i;
if (!priv->bUpdateBBVGA)
return;
if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
return;
if (!(priv->vif->cfg.assoc && priv->current_rssi))
return;
RFvRSSITodBm(priv, (u8)priv->current_rssi, &dbm);
for (i = 0; i < BB_VGA_LEVEL; i++) {
if (dbm < priv->dbm_threshold[i]) {
priv->byBBVGANew = priv->abyBBVGA[i];
break;
}
}
if (priv->byBBVGANew == priv->byBBVGACurrent) {
priv->uBBVGADiffCount = 1;
return;
}
priv->uBBVGADiffCount++;
if (priv->uBBVGADiffCount == 1) {
/* first VGA diff gain */
bb_set_vga_gain_offset(priv, priv->byBBVGANew);
dev_dbg(&priv->pcid->dev,
"First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
(int)dbm, priv->byBBVGANew,
priv->byBBVGACurrent,
(int)priv->uBBVGADiffCount);
}
if (priv->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) {
dev_dbg(&priv->pcid->dev,
"RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
(int)dbm, priv->byBBVGANew,
priv->byBBVGACurrent,
(int)priv->uBBVGADiffCount);
bb_set_vga_gain_offset(priv, priv->byBBVGANew);
}
}
static void vnt_interrupt_process(struct vnt_private *priv)
{
struct ieee80211_low_level_stats *low_stats = &priv->low_stats;
int max_count = 0;
u32 mib_counter;
u32 isr;
unsigned long flags;
isr = ioread32(priv->port_offset + MAC_REG_ISR);
if (isr == 0)
return;
if (isr == 0xffffffff) {
pr_debug("isr = 0xffff\n");
return;
}
spin_lock_irqsave(&priv->lock, flags);
/* Read low level stats */
mib_counter = ioread32(priv->port_offset + MAC_REG_MIBCNTR);
low_stats->dot11RTSSuccessCount += mib_counter & 0xff;
low_stats->dot11RTSFailureCount += (mib_counter >> 8) & 0xff;
low_stats->dot11ACKFailureCount += (mib_counter >> 16) & 0xff;
low_stats->dot11FCSErrorCount += (mib_counter >> 24) & 0xff;
/*
* TBD....
* Must do this after doing rx/tx, cause ISR bit is slow
* than RD/TD write back
* update ISR counter
*/
while (isr && priv->vif) {
iowrite32(isr, priv->port_offset + MAC_REG_ISR);
if (isr & ISR_FETALERR) {
pr_debug(" ISR_FETALERR\n");
iowrite8(0, priv->port_offset + MAC_REG_SOFTPWRCTL);
iowrite16(SOFTPWRCTL_SWPECTI, priv->port_offset + MAC_REG_SOFTPWRCTL);
device_error(priv, isr);
}
if (isr & ISR_TBTT) {
if (priv->op_mode != NL80211_IFTYPE_ADHOC)
vnt_check_bb_vga(priv);
priv->bBeaconSent = false;
if (priv->bEnablePSMode)
PSbIsNextTBTTWakeUp((void *)priv);
if ((priv->op_mode == NL80211_IFTYPE_AP ||
priv->op_mode == NL80211_IFTYPE_ADHOC) &&
priv->vif->bss_conf.enable_beacon)
MACvOneShotTimer1MicroSec(priv,
(priv->vif->bss_conf.beacon_int -
MAKE_BEACON_RESERVED) << 10);
/* TODO: adhoc PS mode */
}
if (isr & ISR_BNTX) {
if (priv->op_mode == NL80211_IFTYPE_ADHOC) {
priv->bIsBeaconBufReadySet = false;
priv->cbBeaconBufReadySetCnt = 0;
}
priv->bBeaconSent = true;
}
if (isr & ISR_RXDMA0)
max_count += device_rx_srv(priv, TYPE_RXDMA0);
if (isr & ISR_RXDMA1)
max_count += device_rx_srv(priv, TYPE_RXDMA1);
if (isr & ISR_TXDMA0)
max_count += device_tx_srv(priv, TYPE_TXDMA0);
if (isr & ISR_AC0DMA)
max_count += device_tx_srv(priv, TYPE_AC0DMA);
if (isr & ISR_SOFTTIMER1) {
if (priv->vif->bss_conf.enable_beacon)
vnt_beacon_make(priv, priv->vif);
}
/* If both buffers available wake the queue */
if (AVAIL_TD(priv, TYPE_TXDMA0) &&
AVAIL_TD(priv, TYPE_AC0DMA) &&
ieee80211_queue_stopped(priv->hw, 0))
ieee80211_wake_queues(priv->hw);
isr = ioread32(priv->port_offset + MAC_REG_ISR);
vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL0);
vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL1);
if (max_count > priv->opts.int_works)
break;
}
spin_unlock_irqrestore(&priv->lock, flags);
}
static void vnt_interrupt_work(struct work_struct *work)
{
struct vnt_private *priv =
container_of(work, struct vnt_private, interrupt_work);
if (priv->vif)
vnt_interrupt_process(priv);
iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
}
static irqreturn_t vnt_interrupt(int irq, void *arg)
{
struct vnt_private *priv = arg;
schedule_work(&priv->interrupt_work);
iowrite32(0, priv->port_offset + MAC_REG_IMR);
return IRQ_HANDLED;
}
static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct vnt_tx_desc *head_td;
u32 dma_idx;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (ieee80211_is_data(hdr->frame_control))
dma_idx = TYPE_AC0DMA;
else
dma_idx = TYPE_TXDMA0;
if (AVAIL_TD(priv, dma_idx) < 1) {
spin_unlock_irqrestore(&priv->lock, flags);
ieee80211_stop_queues(priv->hw);
return -ENOMEM;
}
head_td = priv->apCurrTD[dma_idx];
head_td->td1.tcr = 0;
head_td->td_info->skb = skb;
if (dma_idx == TYPE_AC0DMA)
head_td->td_info->flags = TD_FLAGS_NETIF_SKB;
priv->apCurrTD[dma_idx] = head_td->next;
spin_unlock_irqrestore(&priv->lock, flags);
vnt_generate_fifo_header(priv, dma_idx, head_td, skb);
spin_lock_irqsave(&priv->lock, flags);
priv->bPWBitOn = false;
/* Set TSR1 & ReqCount in TxDescHead */
head_td->td1.tcr |= (TCR_STP | TCR_EDP | EDMSDU);
head_td->td1.req_count = cpu_to_le16(head_td->td_info->req_count);
head_td->buff_addr = cpu_to_le32(head_td->td_info->buf_dma);
/* Poll Transmit the adapter */
wmb();
head_td->td0.owner = OWNED_BY_NIC;
wmb(); /* second memory barrier */
if (head_td->td_info->flags & TD_FLAGS_NETIF_SKB)
vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_AC0DMACTL);
else
vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_TXDMACTL0);
priv->iTDUsed[dma_idx]++;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static void vnt_tx_80211(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct vnt_private *priv = hw->priv;
if (vnt_tx_packet(priv, skb))
ieee80211_free_txskb(hw, skb);
}
static int vnt_start(struct ieee80211_hw *hw)
{
struct vnt_private *priv = hw->priv;
int ret;
priv->rx_buf_sz = PKT_BUF_SZ;
if (!device_init_rings(priv))
return -ENOMEM;
ret = request_irq(priv->pcid->irq, vnt_interrupt,
IRQF_SHARED, "vt6655", priv);
if (ret) {
dev_dbg(&priv->pcid->dev, "failed to start irq\n");
goto err_free_rings;
}
dev_dbg(&priv->pcid->dev, "call device init rd0 ring\n");
ret = device_init_rd0_ring(priv);
if (ret)
goto err_free_irq;
ret = device_init_rd1_ring(priv);
if (ret)
goto err_free_rd0_ring;
ret = device_init_td0_ring(priv);
if (ret)
goto err_free_rd1_ring;
ret = device_init_td1_ring(priv);
if (ret)
goto err_free_td0_ring;
device_init_registers(priv);
dev_dbg(&priv->pcid->dev, "enable MAC interrupt\n");
iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
ieee80211_wake_queues(hw);
return 0;
err_free_td0_ring:
device_free_td0_ring(priv);
err_free_rd1_ring:
device_free_rd1_ring(priv);
err_free_rd0_ring:
device_free_rd0_ring(priv);
err_free_irq:
free_irq(priv->pcid->irq, priv);
err_free_rings:
device_free_rings(priv);
return ret;
}
static void vnt_stop(struct ieee80211_hw *hw)
{
struct vnt_private *priv = hw->priv;
ieee80211_stop_queues(hw);
cancel_work_sync(&priv->interrupt_work);
MACbShutdown(priv);
MACbSoftwareReset(priv);
CARDbRadioPowerOff(priv);
device_free_td0_ring(priv);
device_free_td1_ring(priv);
device_free_rd0_ring(priv);
device_free_rd1_ring(priv);
device_free_rings(priv);
free_irq(priv->pcid->irq, priv);
}
static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct vnt_private *priv = hw->priv;
priv->vif = vif;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
break;
case NL80211_IFTYPE_ADHOC:
vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
break;
case NL80211_IFTYPE_AP:
vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
break;
default:
return -EOPNOTSUPP;
}
priv->op_mode = vif->type;
return 0;
}
static void vnt_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct vnt_private *priv = hw->priv;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
break;
case NL80211_IFTYPE_ADHOC:
vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
vt6655_mac_reg_bits_off(priv->port_offset,
MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
break;
case NL80211_IFTYPE_AP:
vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
vt6655_mac_reg_bits_off(priv->port_offset,
MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
break;
default:
break;
}
priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
}
static int vnt_config(struct ieee80211_hw *hw, u32 changed)
{
struct vnt_private *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
u8 bb_type;
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (conf->flags & IEEE80211_CONF_PS)
PSvEnablePowerSaving(priv, conf->listen_interval);
else
PSvDisablePowerSaving(priv);
}
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
(conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
set_channel(priv, conf->chandef.chan);
if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
bb_type = BB_TYPE_11A;
else
bb_type = BB_TYPE_11G;
if (priv->byBBType != bb_type) {
priv->byBBType = bb_type;
CARDbSetPhyParameter(priv, priv->byBBType);
}
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
if (priv->byBBType == BB_TYPE_11B)
priv->wCurrentRate = RATE_1M;
else
priv->wCurrentRate = RATE_54M;
RFbSetPower(priv, priv->wCurrentRate,
conf->chandef.chan->hw_value);
}
return 0;
}
static void vnt_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf, u64 changed)
{
struct vnt_private *priv = hw->priv;
priv->current_aid = vif->cfg.aid;
if (changed & BSS_CHANGED_BSSID && conf->bssid) {
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
vt6655_mac_write_bssid_addr(priv->port_offset, conf->bssid);
spin_unlock_irqrestore(&priv->lock, flags);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
priv->basic_rates = conf->basic_rates;
CARDvUpdateBasicTopRate(priv);
dev_dbg(&priv->pcid->dev,
"basic rates %x\n", conf->basic_rates);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (conf->use_short_preamble) {
vt6655_mac_en_barker_preamble_md(priv->port_offset);
priv->preamble_type = true;
} else {
vt6655_mac_dis_barker_preamble_md(priv->port_offset);
priv->preamble_type = false;
}
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
if (conf->use_cts_prot)
vt6655_mac_en_protect_md(priv->port_offset);
else
vt6655_mac_dis_protect_md(priv->port_offset);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
if (conf->use_short_slot)
priv->short_slot_time = true;
else
priv->short_slot_time = false;
CARDbSetPhyParameter(priv, priv->byBBType);
bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
}
if (changed & BSS_CHANGED_TXPOWER)
RFbSetPower(priv, priv->wCurrentRate,
conf->chandef.chan->hw_value);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
dev_dbg(&priv->pcid->dev,
"Beacon enable %d\n", conf->enable_beacon);
if (conf->enable_beacon) {
vnt_beacon_enable(priv, vif, conf);
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
} else {
vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR,
TCR_AUTOBCNTX);
}
}
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
priv->op_mode != NL80211_IFTYPE_AP) {
if (vif->cfg.assoc && conf->beacon_rate) {
CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
conf->sync_tsf);
CARDbSetBeaconPeriod(priv, conf->beacon_int);
CARDvSetFirstNextTBTT(priv, conf->beacon_int);
} else {
iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
}
}
}
static u64 vnt_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
struct vnt_private *priv = hw->priv;
struct netdev_hw_addr *ha;
u64 mc_filter = 0;
u32 bit_nr = 0;
netdev_hw_addr_list_for_each(ha, mc_list) {
bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter |= 1ULL << (bit_nr & 0x3f);
}
priv->mc_list_count = mc_list->count;
return mc_filter;
}
static void vnt_configure(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast)
{
struct vnt_private *priv = hw->priv;
u8 rx_mode = 0;
*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
rx_mode = ioread8(priv->port_offset + MAC_REG_RCR);
dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
if (changed_flags & FIF_ALLMULTI) {
if (*total_flags & FIF_ALLMULTI) {
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (priv->mc_list_count > 2) {
VT6655_MAC_SELECT_PAGE1(priv->port_offset);
iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0);
iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0 + 4);
VT6655_MAC_SELECT_PAGE0(priv->port_offset);
} else {
VT6655_MAC_SELECT_PAGE1(priv->port_offset);
multicast = le64_to_cpu(multicast);
iowrite32((u32)multicast, priv->port_offset + MAC_REG_MAR0);
iowrite32((u32)(multicast >> 32),
priv->port_offset + MAC_REG_MAR0 + 4);
VT6655_MAC_SELECT_PAGE0(priv->port_offset);
}
spin_unlock_irqrestore(&priv->lock, flags);
rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
} else {
rx_mode &= ~(RCR_MULTICAST | RCR_BROADCAST);
}
}
if (changed_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)) {
rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
if (*total_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC))
rx_mode &= ~RCR_BSSID;
else
rx_mode |= RCR_BSSID;
}
iowrite8(rx_mode, priv->port_offset + MAC_REG_RCR);
dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
}
static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct vnt_private *priv = hw->priv;
switch (cmd) {
case SET_KEY:
if (vnt_set_keys(hw, sta, vif, key))
return -EOPNOTSUPP;
break;
case DISABLE_KEY:
if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
break;
default:
break;
}
return 0;
}
static int vnt_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct vnt_private *priv = hw->priv;
memcpy(stats, &priv->low_stats, sizeof(*stats));
return 0;
}
static u64 vnt_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct vnt_private *priv = hw->priv;
u64 tsf;
tsf = vt6655_get_current_tsf(priv);
return tsf;
}
static void vnt_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u64 tsf)
{
struct vnt_private *priv = hw->priv;
CARDvUpdateNextTBTT(priv, tsf, vif->bss_conf.beacon_int);
}
static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct vnt_private *priv = hw->priv;
/* reset TSF counter */
iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
}
static const struct ieee80211_ops vnt_mac_ops = {
.tx = vnt_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = vnt_start,
.stop = vnt_stop,
.add_interface = vnt_add_interface,
.remove_interface = vnt_remove_interface,
.config = vnt_config,
.bss_info_changed = vnt_bss_info_changed,
.prepare_multicast = vnt_prepare_multicast,
.configure_filter = vnt_configure,
.set_key = vnt_set_key,
.get_stats = vnt_get_stats,
.get_tsf = vnt_get_tsf,
.set_tsf = vnt_set_tsf,
.reset_tsf = vnt_reset_tsf,
};
static int vnt_init(struct vnt_private *priv)
{
SET_IEEE80211_PERM_ADDR(priv->hw, priv->abyCurrentNetAddr);
vnt_init_bands(priv);
if (ieee80211_register_hw(priv->hw))
return -ENODEV;
priv->mac_hw = true;
CARDbRadioPowerOff(priv);
return 0;
}
static int
vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
{
struct vnt_private *priv;
struct ieee80211_hw *hw;
struct wiphy *wiphy;
int rc;
dev_notice(&pcid->dev,
"%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
dev_notice(&pcid->dev,
"Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
hw = ieee80211_alloc_hw(sizeof(*priv), &vnt_mac_ops);
if (!hw) {
dev_err(&pcid->dev, "could not register ieee80211_hw\n");
return -ENOMEM;
}
priv = hw->priv;
priv->pcid = pcid;
spin_lock_init(&priv->lock);
priv->hw = hw;
SET_IEEE80211_DEV(priv->hw, &pcid->dev);
if (pci_enable_device(pcid)) {
device_free_info(priv);
return -ENODEV;
}
dev_dbg(&pcid->dev,
"Before get pci_info memaddr is %x\n", priv->memaddr);
pci_set_master(pcid);
priv->memaddr = pci_resource_start(pcid, 0);
priv->ioaddr = pci_resource_start(pcid, 1);
priv->port_offset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
256);
if (!priv->port_offset) {
dev_err(&pcid->dev, ": Failed to IO remapping ..\n");
device_free_info(priv);
return -ENODEV;
}
rc = pci_request_regions(pcid, DEVICE_NAME);
if (rc) {
dev_err(&pcid->dev, ": Failed to find PCI device\n");
device_free_info(priv);
return -ENODEV;
}
if (dma_set_mask(&pcid->dev, DMA_BIT_MASK(32))) {
dev_err(&pcid->dev, ": Failed to set dma 32 bit mask\n");
device_free_info(priv);
return -ENODEV;
}
INIT_WORK(&priv->interrupt_work, vnt_interrupt_work);
/* do reset */
if (!MACbSoftwareReset(priv)) {
dev_err(&pcid->dev, ": Failed to access MAC hardware..\n");
device_free_info(priv);
return -ENODEV;
}
/* initial to reload eeprom */
MACvInitialize(priv);
vt6655_mac_read_ether_addr(priv->port_offset, priv->abyCurrentNetAddr);
/* Get RFType */
priv->byRFType = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_RFTYPE);
priv->byRFType &= RF_MASK;
dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
device_get_options(priv);
device_set_options(priv);
wiphy = priv->hw->wiphy;
wiphy->frag_threshold = FRAG_THRESH_DEF;
wiphy->rts_threshold = RTS_THRESH_DEF;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
ieee80211_hw_set(priv->hw, SIGNAL_DBM);
ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(priv->hw, SUPPORTS_PS);
priv->hw->max_signal = 100;
if (vnt_init(priv)) {
device_free_info(priv);
return -ENODEV;
}
device_print_info(priv);
pci_set_drvdata(pcid, priv);
return 0;
}
/*------------------------------------------------------------------*/
static int __maybe_unused vt6655_suspend(struct device *dev_d)
{
struct vnt_private *priv = dev_get_drvdata(dev_d);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
MACbShutdown(priv);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int __maybe_unused vt6655_resume(struct device *dev_d)
{
device_wakeup_disable(dev_d);
return 0;
}
MODULE_DEVICE_TABLE(pci, vt6655_pci_id_table);
static SIMPLE_DEV_PM_OPS(vt6655_pm_ops, vt6655_suspend, vt6655_resume);
static struct pci_driver device_driver = {
.name = DEVICE_NAME,
.id_table = vt6655_pci_id_table,
.probe = vt6655_probe,
.remove = vt6655_remove,
.driver.pm = &vt6655_pm_ops,
};
module_pci_driver(device_driver);
| linux-master | drivers/staging/vt6655/device_main.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* Purpose: Provide functions to setup NIC operation mode
* Functions:
* s_vSafeResetTx - Rest Tx
* CARDvSetRSPINF - Set RSPINF
* CARDvUpdateBasicTopRate - Update BasicTopRate
* CARDbAddBasicRate - Add to BasicRateSet
* CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet
* CARDqGetTSFOffset - Calculate TSFOffset
* vt6655_get_current_tsf - Read Current NIC TSF counter
* CARDqGetNextTBTT - Calculate Next Beacon TSF counter
* CARDvSetFirstNextTBTT - Set NIC Beacon time
* CARDvUpdateNextTBTT - Sync. NIC Beacon time
* CARDbRadioPowerOff - Turn Off NIC Radio Power
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
* 08-26-2003 Kyle Hsu: Modify the definition type of iobase.
* 09-01-2003 Bryan YC Fan: Add vUpdateIFS().
*
*/
#include "card.h"
#include "baseband.h"
#include "mac.h"
#include "desc.h"
#include "rf.h"
#include "power.h"
/*--------------------- Static Definitions -------------------------*/
#define C_SIFS_A 16 /* micro sec. */
#define C_SIFS_BG 10
#define C_EIFS 80 /* micro sec. */
#define C_SLOT_SHORT 9 /* micro sec. */
#define C_SLOT_LONG 20
#define C_CWMIN_A 15 /* slot time */
#define C_CWMIN_B 31
#define C_CWMAX 1023 /* slot time */
#define WAIT_BEACON_TX_DOWN_TMO 3 /* Times */
/*--------------------- Static Variables --------------------------*/
static const unsigned short cwRXBCNTSFOff[MAX_RATE] = {
17, 17, 17, 17, 34, 23, 17, 11, 8, 5, 4, 3};
/*--------------------- Static Functions --------------------------*/
static void vt6655_mac_set_bb_type(void __iomem *iobase, u32 mask)
{
u32 reg_value;
reg_value = ioread32(iobase + MAC_REG_ENCFG);
reg_value = reg_value & ~ENCFG_BBTYPE_MASK;
reg_value = reg_value | mask;
iowrite32(reg_value, iobase + MAC_REG_ENCFG);
}
/*--------------------- Export Functions --------------------------*/
/*
* Description: Calculate TxRate and RsvTime fields for RSPINF in OFDM mode.
*
* Parameters:
* In:
* wRate - Tx Rate
* byPktType - Tx Packet type
* Out:
* pbyTxRate - pointer to RSPINF TxRate field
* pbyRsvTime - pointer to RSPINF RsvTime field
*
* Return Value: none
*/
static void s_vCalculateOFDMRParameter(unsigned char rate,
u8 bb_type,
unsigned char *pbyTxRate,
unsigned char *pbyRsvTime)
{
switch (rate) {
case RATE_6M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9B;
*pbyRsvTime = 44;
} else {
*pbyTxRate = 0x8B;
*pbyRsvTime = 50;
}
break;
case RATE_9M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9F;
*pbyRsvTime = 36;
} else {
*pbyTxRate = 0x8F;
*pbyRsvTime = 42;
}
break;
case RATE_12M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9A;
*pbyRsvTime = 32;
} else {
*pbyTxRate = 0x8A;
*pbyRsvTime = 38;
}
break;
case RATE_18M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9E;
*pbyRsvTime = 28;
} else {
*pbyTxRate = 0x8E;
*pbyRsvTime = 34;
}
break;
case RATE_36M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9D;
*pbyRsvTime = 24;
} else {
*pbyTxRate = 0x8D;
*pbyRsvTime = 30;
}
break;
case RATE_48M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x98;
*pbyRsvTime = 24;
} else {
*pbyTxRate = 0x88;
*pbyRsvTime = 30;
}
break;
case RATE_54M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9C;
*pbyRsvTime = 24;
} else {
*pbyTxRate = 0x8C;
*pbyRsvTime = 30;
}
break;
case RATE_24M:
default:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x99;
*pbyRsvTime = 28;
} else {
*pbyTxRate = 0x89;
*pbyRsvTime = 34;
}
break;
}
}
/*--------------------- Export Functions --------------------------*/
/*
* Description: Update IFS
*
* Parameters:
* In:
* priv - The adapter to be set
* Out:
* none
*
* Return Value: None.
*/
bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
{
unsigned char byCWMaxMin = 0;
unsigned char bySlot = 0;
unsigned char bySIFS = 0;
unsigned char byDIFS = 0;
int i;
/* Set SIFS, DIFS, EIFS, SlotTime, CwMin */
if (bb_type == BB_TYPE_11A) {
vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11A);
bb_write_embedded(priv, 0x88, 0x03);
bySlot = C_SLOT_SHORT;
bySIFS = C_SIFS_A;
byDIFS = C_SIFS_A + 2 * C_SLOT_SHORT;
byCWMaxMin = 0xA4;
} else if (bb_type == BB_TYPE_11B) {
vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11B);
bb_write_embedded(priv, 0x88, 0x02);
bySlot = C_SLOT_LONG;
bySIFS = C_SIFS_BG;
byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
byCWMaxMin = 0xA5;
} else { /* PK_TYPE_11GA & PK_TYPE_11GB */
vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11G);
bb_write_embedded(priv, 0x88, 0x08);
bySIFS = C_SIFS_BG;
if (priv->short_slot_time) {
bySlot = C_SLOT_SHORT;
byDIFS = C_SIFS_BG + 2 * C_SLOT_SHORT;
} else {
bySlot = C_SLOT_LONG;
byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
}
byCWMaxMin = 0xa4;
for (i = RATE_54M; i >= RATE_6M; i--) {
if (priv->basic_rates & ((u32)(0x1 << i))) {
byCWMaxMin |= 0x1;
break;
}
}
}
if (priv->byRFType == RF_RFMD2959) {
/*
* bcs TX_PE will reserve 3 us hardware's processing
* time here is 2 us.
*/
bySIFS -= 3;
byDIFS -= 3;
/*
* TX_PE will reserve 3 us for MAX2829 A mode only, it is for
* better TX throughput; MAC will need 2 us to process, so the
* SIFS, DIFS can be shorter by 2 us.
*/
}
if (priv->bySIFS != bySIFS) {
priv->bySIFS = bySIFS;
iowrite8(priv->bySIFS, priv->port_offset + MAC_REG_SIFS);
}
if (priv->byDIFS != byDIFS) {
priv->byDIFS = byDIFS;
iowrite8(priv->byDIFS, priv->port_offset + MAC_REG_DIFS);
}
if (priv->byEIFS != C_EIFS) {
priv->byEIFS = C_EIFS;
iowrite8(priv->byEIFS, priv->port_offset + MAC_REG_EIFS);
}
if (priv->bySlot != bySlot) {
priv->bySlot = bySlot;
iowrite8(priv->bySlot, priv->port_offset + MAC_REG_SLOT);
bb_set_short_slot_time(priv);
}
if (priv->byCWMaxMin != byCWMaxMin) {
priv->byCWMaxMin = byCWMaxMin;
iowrite8(priv->byCWMaxMin, priv->port_offset + MAC_REG_CWMAXMIN0);
}
priv->byPacketType = CARDbyGetPktType(priv);
CARDvSetRSPINF(priv, bb_type);
return true;
}
/*
* Description: Sync. TSF counter to BSS
* Get TSF offset and write to HW
*
* Parameters:
* In:
* priv - The adapter to be sync.
* byRxRate - data rate of receive beacon
* qwBSSTimestamp - Rx BCN's TSF
* qwLocalTSF - Local TSF
* Out:
* none
*
* Return Value: none
*/
bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
u64 qwBSSTimestamp)
{
u64 local_tsf;
u64 qwTSFOffset = 0;
local_tsf = vt6655_get_current_tsf(priv);
if (qwBSSTimestamp != local_tsf) {
qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
local_tsf);
/* adjust TSF, HW's TSF add TSF Offset reg */
qwTSFOffset = le64_to_cpu(qwTSFOffset);
iowrite32((u32)qwTSFOffset, priv->port_offset + MAC_REG_TSFOFST);
iowrite32((u32)(qwTSFOffset >> 32), priv->port_offset + MAC_REG_TSFOFST + 4);
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN);
}
return true;
}
/*
* Description: Set NIC TSF counter for first Beacon time
* Get NEXTTBTT from adjusted TSF and Beacon Interval
*
* Parameters:
* In:
* priv - The adapter to be set.
* wBeaconInterval - Beacon Interval
* Out:
* none
*
* Return Value: true if succeed; otherwise false
*/
bool CARDbSetBeaconPeriod(struct vnt_private *priv,
unsigned short wBeaconInterval)
{
u64 qwNextTBTT;
qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
/* set HW beacon interval */
iowrite16(wBeaconInterval, priv->port_offset + MAC_REG_BI);
priv->wBeaconInterval = wBeaconInterval;
/* Set NextTBTT */
qwNextTBTT = le64_to_cpu(qwNextTBTT);
iowrite32((u32)qwNextTBTT, priv->port_offset + MAC_REG_NEXTTBTT);
iowrite32((u32)(qwNextTBTT >> 32), priv->port_offset + MAC_REG_NEXTTBTT + 4);
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
return true;
}
/*
* Description: Turn off Radio power
*
* Parameters:
* In:
* priv - The adapter to be turned off
* Out:
* none
*
*/
void CARDbRadioPowerOff(struct vnt_private *priv)
{
if (priv->radio_off)
return;
switch (priv->byRFType) {
case RF_RFMD2959:
vt6655_mac_word_reg_bits_off(priv->port_offset, MAC_REG_SOFTPWRCTL,
SOFTPWRCTL_TXPEINV);
vt6655_mac_word_reg_bits_on(priv->port_offset, MAC_REG_SOFTPWRCTL,
SOFTPWRCTL_SWPE1);
break;
case RF_AIROHA:
case RF_AL2230S:
vt6655_mac_word_reg_bits_off(priv->port_offset, MAC_REG_SOFTPWRCTL,
SOFTPWRCTL_SWPE2);
vt6655_mac_word_reg_bits_off(priv->port_offset, MAC_REG_SOFTPWRCTL,
SOFTPWRCTL_SWPE3);
break;
}
vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_RXON);
bb_set_deep_sleep(priv, priv->local_id);
priv->radio_off = true;
pr_debug("chester power off\n");
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
}
void CARDvSafeResetTx(struct vnt_private *priv)
{
unsigned int uu;
struct vnt_tx_desc *pCurrTD;
/* initialize TD index */
priv->apTailTD[0] = &priv->apTD0Rings[0];
priv->apCurrTD[0] = &priv->apTD0Rings[0];
priv->apTailTD[1] = &priv->apTD1Rings[0];
priv->apCurrTD[1] = &priv->apTD1Rings[0];
for (uu = 0; uu < TYPE_MAXTD; uu++)
priv->iTDUsed[uu] = 0;
for (uu = 0; uu < priv->opts.tx_descs[0]; uu++) {
pCurrTD = &priv->apTD0Rings[uu];
pCurrTD->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
for (uu = 0; uu < priv->opts.tx_descs[1]; uu++) {
pCurrTD = &priv->apTD1Rings[uu];
pCurrTD->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
/* set MAC TD pointer */
vt6655_mac_set_curr_tx_desc_addr(TYPE_TXDMA0, priv, priv->td0_pool_dma);
vt6655_mac_set_curr_tx_desc_addr(TYPE_AC0DMA, priv, priv->td1_pool_dma);
/* set MAC Beacon TX pointer */
iowrite32((u32)priv->tx_beacon_dma, priv->port_offset + MAC_REG_BCNDMAPTR);
}
/*
* Description:
* Reset Rx
*
* Parameters:
* In:
* priv - Pointer to the adapter
* Out:
* none
*
* Return Value: none
*/
void CARDvSafeResetRx(struct vnt_private *priv)
{
unsigned int uu;
struct vnt_rx_desc *pDesc;
/* initialize RD index */
priv->pCurrRD[0] = &priv->aRD0Ring[0];
priv->pCurrRD[1] = &priv->aRD1Ring[0];
/* init state, all RD is chip's */
for (uu = 0; uu < priv->opts.rx_descs0; uu++) {
pDesc = &priv->aRD0Ring[uu];
pDesc->rd0.res_count = cpu_to_le16(priv->rx_buf_sz);
pDesc->rd0.owner = OWNED_BY_NIC;
pDesc->rd1.req_count = cpu_to_le16(priv->rx_buf_sz);
}
/* init state, all RD is chip's */
for (uu = 0; uu < priv->opts.rx_descs1; uu++) {
pDesc = &priv->aRD1Ring[uu];
pDesc->rd0.res_count = cpu_to_le16(priv->rx_buf_sz);
pDesc->rd0.owner = OWNED_BY_NIC;
pDesc->rd1.req_count = cpu_to_le16(priv->rx_buf_sz);
}
/* set perPkt mode */
iowrite32(RX_PERPKT, priv->port_offset + MAC_REG_RXDMACTL0);
iowrite32(RX_PERPKT, priv->port_offset + MAC_REG_RXDMACTL1);
/* set MAC RD pointer */
vt6655_mac_set_curr_rx_0_desc_addr(priv, priv->rd0_pool_dma);
vt6655_mac_set_curr_rx_1_desc_addr(priv, priv->rd1_pool_dma);
}
/*
* Description: Get response Control frame rate in CCK mode
*
* Parameters:
* In:
* priv - The adapter to be set
* wRateIdx - Receiving data rate
* Out:
* none
*
* Return Value: response Control frame rate
*/
static unsigned short CARDwGetCCKControlRate(struct vnt_private *priv,
unsigned short wRateIdx)
{
unsigned int ui = (unsigned int)wRateIdx;
while (ui > RATE_1M) {
if (priv->basic_rates & ((u32)0x1 << ui))
return (unsigned short)ui;
ui--;
}
return (unsigned short)RATE_1M;
}
/*
* Description: Get response Control frame rate in OFDM mode
*
* Parameters:
* In:
* priv - The adapter to be set
* wRateIdx - Receiving data rate
* Out:
* none
*
* Return Value: response Control frame rate
*/
static unsigned short CARDwGetOFDMControlRate(struct vnt_private *priv,
unsigned short wRateIdx)
{
unsigned int ui = (unsigned int)wRateIdx;
pr_debug("BASIC RATE: %X\n", priv->basic_rates);
if (!CARDbIsOFDMinBasicRate((void *)priv)) {
pr_debug("%s:(NO OFDM) %d\n", __func__, wRateIdx);
if (wRateIdx > RATE_24M)
wRateIdx = RATE_24M;
return wRateIdx;
}
while (ui > RATE_11M) {
if (priv->basic_rates & ((u32)0x1 << ui)) {
pr_debug("%s : %d\n", __func__, ui);
return (unsigned short)ui;
}
ui--;
}
pr_debug("%s: 6M\n", __func__);
return (unsigned short)RATE_24M;
}
/*
* Description: Set RSPINF
*
* Parameters:
* In:
* priv - The adapter to be set
* Out:
* none
*
* Return Value: None.
*/
void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
{
union vnt_phy_field_swap phy;
unsigned char byTxRate, byRsvTime; /* For OFDM */
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
/* Set to Page1 */
VT6655_MAC_SELECT_PAGE1(priv->port_offset);
/* RSPINF_b_1 */
vnt_get_phy_field(priv, 14,
CARDwGetCCKControlRate(priv, RATE_1M),
PK_TYPE_11B, &phy.field_read);
/* swap over to get correct write order */
swap(phy.swap[0], phy.swap[1]);
iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_1);
/* RSPINF_b_2 */
vnt_get_phy_field(priv, 14,
CARDwGetCCKControlRate(priv, RATE_2M),
PK_TYPE_11B, &phy.field_read);
swap(phy.swap[0], phy.swap[1]);
iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_2);
/* RSPINF_b_5 */
vnt_get_phy_field(priv, 14,
CARDwGetCCKControlRate(priv, RATE_5M),
PK_TYPE_11B, &phy.field_read);
swap(phy.swap[0], phy.swap[1]);
iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_5);
/* RSPINF_b_11 */
vnt_get_phy_field(priv, 14,
CARDwGetCCKControlRate(priv, RATE_11M),
PK_TYPE_11B, &phy.field_read);
swap(phy.swap[0], phy.swap[1]);
iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_11);
/* RSPINF_a_6 */
s_vCalculateOFDMRParameter(RATE_6M,
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_6);
/* RSPINF_a_9 */
s_vCalculateOFDMRParameter(RATE_9M,
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_9);
/* RSPINF_a_12 */
s_vCalculateOFDMRParameter(RATE_12M,
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_12);
/* RSPINF_a_18 */
s_vCalculateOFDMRParameter(RATE_18M,
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_18);
/* RSPINF_a_24 */
s_vCalculateOFDMRParameter(RATE_24M,
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_24);
/* RSPINF_a_36 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_36M),
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_36);
/* RSPINF_a_48 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_48M),
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_48);
/* RSPINF_a_54 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_54);
/* RSPINF_a_72 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_72);
/* Set to Page0 */
VT6655_MAC_SELECT_PAGE0(priv->port_offset);
spin_unlock_irqrestore(&priv->lock, flags);
}
void CARDvUpdateBasicTopRate(struct vnt_private *priv)
{
unsigned char byTopOFDM = RATE_24M, byTopCCK = RATE_1M;
unsigned char ii;
/* Determines the highest basic rate. */
for (ii = RATE_54M; ii >= RATE_6M; ii--) {
if ((priv->basic_rates) & ((u32)(1 << ii))) {
byTopOFDM = ii;
break;
}
}
priv->byTopOFDMBasicRate = byTopOFDM;
for (ii = RATE_11M;; ii--) {
if ((priv->basic_rates) & ((u32)(1 << ii))) {
byTopCCK = ii;
break;
}
if (ii == RATE_1M)
break;
}
priv->byTopCCKBasicRate = byTopCCK;
}
bool CARDbIsOFDMinBasicRate(struct vnt_private *priv)
{
int ii;
for (ii = RATE_54M; ii >= RATE_6M; ii--) {
if ((priv->basic_rates) & ((u32)BIT(ii)))
return true;
}
return false;
}
unsigned char CARDbyGetPktType(struct vnt_private *priv)
{
if (priv->byBBType == BB_TYPE_11A || priv->byBBType == BB_TYPE_11B)
return (unsigned char)priv->byBBType;
else if (CARDbIsOFDMinBasicRate((void *)priv))
return PK_TYPE_11GA;
else
return PK_TYPE_11GB;
}
/*
* Description: Calculate TSF offset of two TSF input
* Get TSF Offset from RxBCN's TSF and local TSF
*
* Parameters:
* In:
* priv - The adapter to be sync.
* qwTSF1 - Rx BCN's TSF
* qwTSF2 - Local TSF
* Out:
* none
*
* Return Value: TSF Offset value
*/
u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
{
unsigned short wRxBcnTSFOffst;
wRxBcnTSFOffst = cwRXBCNTSFOff[byRxRate % MAX_RATE];
qwTSF2 += (u64)wRxBcnTSFOffst;
return qwTSF1 - qwTSF2;
}
/*
* Description: Read NIC TSF counter
* Get local TSF counter
*
* Parameters:
* In:
* priv - The adapter to be read
* Out:
* none
*
* Return Value: Current TSF counter
*/
u64 vt6655_get_current_tsf(struct vnt_private *priv)
{
void __iomem *iobase = priv->port_offset;
unsigned short ww;
unsigned char data;
u32 low, high;
vt6655_mac_reg_bits_on(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
data = ioread8(iobase + MAC_REG_TFTCTL);
if (!(data & TFTCTL_TSFCNTRRD))
break;
}
if (ww == W_MAX_TIMEOUT)
return 0;
low = ioread32(iobase + MAC_REG_TSFCNTR);
high = ioread32(iobase + MAC_REG_TSFCNTR + 4);
return le64_to_cpu(low + ((u64)high << 32));
}
/*
* Description: Read NIC TSF counter
* Get NEXTTBTT from adjusted TSF and Beacon Interval
*
* Parameters:
* In:
* qwTSF - Current TSF counter
* wbeaconInterval - Beacon Interval
* Out:
* qwCurrTSF - Current TSF counter
*
* Return Value: TSF value of next Beacon
*/
u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
{
u32 beacon_int;
beacon_int = wBeaconInterval * 1024;
if (beacon_int) {
do_div(qwTSF, beacon_int);
qwTSF += 1;
qwTSF *= beacon_int;
}
return qwTSF;
}
/*
* Description: Set NIC TSF counter for first Beacon time
* Get NEXTTBTT from adjusted TSF and Beacon Interval
*
* Parameters:
* In:
* iobase - IO Base
* wBeaconInterval - Beacon Interval
* Out:
* none
*
* Return Value: none
*/
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval)
{
void __iomem *iobase = priv->port_offset;
u64 qwNextTBTT;
qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
/* Set NextTBTT */
qwNextTBTT = le64_to_cpu(qwNextTBTT);
iowrite32((u32)qwNextTBTT, iobase + MAC_REG_NEXTTBTT);
iowrite32((u32)(qwNextTBTT >> 32), iobase + MAC_REG_NEXTTBTT + 4);
vt6655_mac_reg_bits_on(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
}
/*
* Description: Sync NIC TSF counter for Beacon time
* Get NEXTTBTT and write to HW
*
* Parameters:
* In:
* priv - The adapter to be set
* qwTSF - Current TSF counter
* wBeaconInterval - Beacon Interval
* Out:
* none
*
* Return Value: none
*/
void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
unsigned short wBeaconInterval)
{
void __iomem *iobase = priv->port_offset;
qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
/* Set NextTBTT */
qwTSF = le64_to_cpu(qwTSF);
iowrite32((u32)qwTSF, iobase + MAC_REG_NEXTTBTT);
iowrite32((u32)(qwTSF >> 32), iobase + MAC_REG_NEXTTBTT + 4);
vt6655_mac_reg_bits_on(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
pr_debug("Card:Update Next TBTT[%8llx]\n", qwTSF);
}
| linux-master | drivers/staging/vt6655/card.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* Purpose: Implement functions for 802.11i Key management
*
* Author: Jerry Chen
*
* Date: May 29, 2003
*
*/
#include "key.h"
#include "mac.h"
static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
struct ieee80211_key_conf *key, u32 key_type,
u32 mode, bool onfly_latch)
{
struct vnt_private *priv = hw->priv;
u8 broadcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u16 key_mode = 0;
u32 entry = 0;
u8 *bssid;
u8 key_inx = key->keyidx;
u8 i;
if (mac_addr)
bssid = mac_addr;
else
bssid = &broadcast[0];
if (key_type != VNT_KEY_DEFAULTKEY) {
for (i = 0; i < (MAX_KEY_TABLE - 1); i++) {
if (!test_bit(i, &priv->key_entry_inuse)) {
set_bit(i, &priv->key_entry_inuse);
key->hw_key_idx = i;
entry = key->hw_key_idx;
break;
}
}
}
switch (key_type) {
case VNT_KEY_DEFAULTKEY:
/* default key last entry */
entry = MAX_KEY_TABLE - 1;
key->hw_key_idx = entry;
fallthrough;
case VNT_KEY_ALLGROUP:
key_mode |= VNT_KEY_ALLGROUP;
if (onfly_latch)
key_mode |= VNT_KEY_ONFLY_ALL;
fallthrough;
case VNT_KEY_GROUP_ADDRESS:
key_mode |= mode;
fallthrough;
case VNT_KEY_GROUP:
key_mode |= (mode << 4);
key_mode |= VNT_KEY_GROUP;
break;
case VNT_KEY_PAIRWISE:
key_mode |= mode;
key_inx = 4;
break;
default:
return -EINVAL;
}
if (onfly_latch)
key_mode |= VNT_KEY_ONFLY;
if (mode == KEY_CTL_WEP) {
if (key->keylen == WLAN_KEY_LEN_WEP40)
key->key[15] &= 0x7f;
if (key->keylen == WLAN_KEY_LEN_WEP104)
key->key[15] |= 0x80;
}
MACvSetKeyEntry(priv, key_mode, entry, key_inx,
bssid, (u32 *)key->key, priv->local_id);
return 0;
}
int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, struct ieee80211_key_conf *key)
{
struct ieee80211_bss_conf *conf = &vif->bss_conf;
struct vnt_private *priv = hw->priv;
u8 *mac_addr = NULL;
u8 key_dec_mode = 0;
int ret = 0;
u32 u;
if (sta)
mac_addr = &sta->addr[0];
switch (key->cipher) {
case 0:
for (u = 0 ; u < MAX_KEY_TABLE; u++)
MACvDisableKeyEntry(priv, u);
return ret;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
for (u = 0; u < MAX_KEY_TABLE; u++)
MACvDisableKeyEntry(priv, u);
vnt_set_keymode(hw, mac_addr,
key, VNT_KEY_DEFAULTKEY, KEY_CTL_WEP, true);
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
return ret;
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
key_dec_mode = KEY_CTL_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
key_dec_mode = KEY_CTL_CCMP;
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
vnt_set_keymode(hw, mac_addr,
key, VNT_KEY_PAIRWISE, key_dec_mode, true);
} else {
vnt_set_keymode(hw, mac_addr,
key, VNT_KEY_DEFAULTKEY, key_dec_mode, true);
vnt_set_keymode(hw, (u8 *)conf->bssid,
key, VNT_KEY_GROUP_ADDRESS, key_dec_mode, true);
}
return 0;
}
| linux-master | drivers/staging/vt6655/key.c |
// SPDX-License-Identifier: GPL-2.0
/* IEEE 802.11 SoftMAC layer
* Copyright (c) 2005 Andrea Merello <[email protected]>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
*
* Some pieces of code might be stolen from ipw2100 driver
* copyright of who own it's copyright ;-)
*
* PS wx handler mostly stolen from hostap, copyright who
* own it's copyright ;-)
*/
#include <linux/etherdevice.h>
#include "rtllib.h"
#include "dot11d.h"
int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int ret;
struct iw_freq *fwrq = &wrqu->freq;
mutex_lock(&ieee->wx_mutex);
if (ieee->iw_mode == IW_MODE_INFRA) {
ret = 0;
goto out;
}
/* if setting by freq convert to channel */
if (fwrq->e == 1) {
if ((fwrq->m >= (int)2.412e8 &&
fwrq->m <= (int)2.487e8)) {
fwrq->m = ieee80211_freq_khz_to_channel(fwrq->m / 100);
fwrq->e = 0;
}
}
if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1) {
ret = -EOPNOTSUPP;
goto out;
} else { /* Set the channel */
if (ieee->active_channel_map[fwrq->m] != 1) {
ret = -EINVAL;
goto out;
}
ieee->current_network.channel = fwrq->m;
ieee->set_chan(ieee->dev, ieee->current_network.channel);
if (ieee->iw_mode == IW_MODE_ADHOC)
if (ieee->link_state == MAC80211_LINKED) {
rtllib_stop_send_beacons(ieee);
rtllib_start_send_beacons(ieee);
}
}
ret = 0;
out:
mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_freq);
int rtllib_wx_get_freq(struct rtllib_device *ieee,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct iw_freq *fwrq = &wrqu->freq;
if (ieee->current_network.channel == 0)
return -1;
fwrq->m = ieee80211_channel_to_freq_khz(ieee->current_network.channel,
NL80211_BAND_2GHZ) * 100;
fwrq->e = 1;
return 0;
}
EXPORT_SYMBOL(rtllib_wx_get_freq);
int rtllib_wx_get_wap(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
unsigned long flags;
wrqu->ap_addr.sa_family = ARPHRD_ETHER;
if (ieee->iw_mode == IW_MODE_MONITOR)
return -1;
/* We want avoid to give to the user inconsistent infos*/
spin_lock_irqsave(&ieee->lock, flags);
if (ieee->link_state != MAC80211_LINKED &&
ieee->link_state != MAC80211_LINKED_SCANNING &&
ieee->wap_set == 0)
eth_zero_addr(wrqu->ap_addr.sa_data);
else
memcpy(wrqu->ap_addr.sa_data,
ieee->current_network.bssid, ETH_ALEN);
spin_unlock_irqrestore(&ieee->lock, flags);
return 0;
}
EXPORT_SYMBOL(rtllib_wx_get_wap);
int rtllib_wx_set_wap(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *awrq,
char *extra)
{
int ret = 0;
unsigned long flags;
short ifup = ieee->proto_started;
struct sockaddr *temp = (struct sockaddr *)awrq;
rtllib_stop_scan_syncro(ieee);
mutex_lock(&ieee->wx_mutex);
/* use ifconfig hw ether */
if (temp->sa_family != ARPHRD_ETHER) {
ret = -EINVAL;
goto out;
}
if (is_zero_ether_addr(temp->sa_data)) {
spin_lock_irqsave(&ieee->lock, flags);
ether_addr_copy(ieee->current_network.bssid, temp->sa_data);
ieee->wap_set = 0;
spin_unlock_irqrestore(&ieee->lock, flags);
ret = -1;
goto out;
}
if (ifup)
rtllib_stop_protocol(ieee, true);
/* just to avoid to give inconsistent infos in the
* get wx method. not really needed otherwise
*/
spin_lock_irqsave(&ieee->lock, flags);
ieee->cannot_notify = false;
ether_addr_copy(ieee->current_network.bssid, temp->sa_data);
ieee->wap_set = !is_zero_ether_addr(temp->sa_data);
spin_unlock_irqrestore(&ieee->lock, flags);
if (ifup)
rtllib_start_protocol(ieee);
out:
mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_wap);
int rtllib_wx_get_essid(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int len, ret = 0;
unsigned long flags;
if (ieee->iw_mode == IW_MODE_MONITOR)
return -1;
/* We want avoid to give to the user inconsistent infos*/
spin_lock_irqsave(&ieee->lock, flags);
if (ieee->current_network.ssid[0] == '\0' ||
ieee->current_network.ssid_len == 0) {
ret = -1;
goto out;
}
if (ieee->link_state != MAC80211_LINKED &&
ieee->link_state != MAC80211_LINKED_SCANNING &&
ieee->ssid_set == 0) {
ret = -1;
goto out;
}
len = ieee->current_network.ssid_len;
wrqu->essid.length = len;
strncpy(b, ieee->current_network.ssid, len);
wrqu->essid.flags = 1;
out:
spin_unlock_irqrestore(&ieee->lock, flags);
return ret;
}
EXPORT_SYMBOL(rtllib_wx_get_essid);
int rtllib_wx_set_rate(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
u32 target_rate = wrqu->bitrate.value;
ieee->rate = target_rate / 100000;
return 0;
}
EXPORT_SYMBOL(rtllib_wx_set_rate);
int rtllib_wx_get_rate(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
u32 tmp_rate;
tmp_rate = TxCountToDataRate(ieee,
ieee->softmac_stats.CurrentShowTxate);
wrqu->bitrate.value = tmp_rate * 500000;
return 0;
}
EXPORT_SYMBOL(rtllib_wx_get_rate);
int rtllib_wx_set_rts(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
if (wrqu->rts.disabled || !wrqu->rts.fixed) {
ieee->rts = DEFAULT_RTS_THRESHOLD;
} else {
if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
wrqu->rts.value > MAX_RTS_THRESHOLD)
return -EINVAL;
ieee->rts = wrqu->rts.value;
}
return 0;
}
EXPORT_SYMBOL(rtllib_wx_set_rts);
int rtllib_wx_get_rts(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
wrqu->rts.value = ieee->rts;
wrqu->rts.fixed = 0; /* no auto select */
wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
return 0;
}
EXPORT_SYMBOL(rtllib_wx_get_rts);
int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int set_mode_status = 0;
rtllib_stop_scan_syncro(ieee);
mutex_lock(&ieee->wx_mutex);
switch (wrqu->mode) {
case IW_MODE_MONITOR:
case IW_MODE_ADHOC:
case IW_MODE_INFRA:
break;
case IW_MODE_AUTO:
wrqu->mode = IW_MODE_INFRA;
break;
default:
set_mode_status = -EINVAL;
goto out;
}
if (wrqu->mode == ieee->iw_mode)
goto out;
if (wrqu->mode == IW_MODE_MONITOR) {
ieee->dev->type = ARPHRD_IEEE80211;
rtllib_EnableNetMonitorMode(ieee->dev, false);
} else {
ieee->dev->type = ARPHRD_ETHER;
if (ieee->iw_mode == IW_MODE_MONITOR)
rtllib_DisableNetMonitorMode(ieee->dev, false);
}
if (!ieee->proto_started) {
ieee->iw_mode = wrqu->mode;
} else {
rtllib_stop_protocol(ieee, true);
ieee->iw_mode = wrqu->mode;
rtllib_start_protocol(ieee);
}
out:
mutex_unlock(&ieee->wx_mutex);
return set_mode_status;
}
EXPORT_SYMBOL(rtllib_wx_set_mode);
void rtllib_wx_sync_scan_wq(void *data)
{
struct rtllib_device *ieee = container_of(data, struct rtllib_device, wx_sync_scan_wq);
short chan;
enum ht_extchnl_offset chan_offset = 0;
enum ht_channel_width bandwidth = 0;
int b40M = 0;
mutex_lock(&ieee->wx_mutex);
if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) {
rtllib_start_scan_syncro(ieee);
goto out;
}
chan = ieee->current_network.channel;
ieee->leisure_ps_leave(ieee->dev);
/* notify AP to be in PS mode */
rtllib_sta_ps_send_null_frame(ieee, 1);
rtllib_sta_ps_send_null_frame(ieee, 1);
rtllib_stop_all_queues(ieee);
rtllib_stop_send_beacons(ieee);
ieee->link_state = MAC80211_LINKED_SCANNING;
ieee->link_change(ieee->dev);
/* wait for ps packet to be kicked out successfully */
msleep(50);
ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP);
if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht &&
ieee->ht_info->bCurBW40MHz) {
b40M = 1;
chan_offset = ieee->ht_info->CurSTAExtChnlOffset;
bandwidth = (enum ht_channel_width)ieee->ht_info->bCurBW40MHz;
ieee->set_bw_mode_handler(ieee->dev, HT_CHANNEL_WIDTH_20,
HT_EXTCHNL_OFFSET_NO_EXT);
}
rtllib_start_scan_syncro(ieee);
if (b40M) {
if (chan_offset == HT_EXTCHNL_OFFSET_UPPER)
ieee->set_chan(ieee->dev, chan + 2);
else if (chan_offset == HT_EXTCHNL_OFFSET_LOWER)
ieee->set_chan(ieee->dev, chan - 2);
else
ieee->set_chan(ieee->dev, chan);
ieee->set_bw_mode_handler(ieee->dev, bandwidth, chan_offset);
} else {
ieee->set_chan(ieee->dev, chan);
}
ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE);
ieee->link_state = MAC80211_LINKED;
ieee->link_change(ieee->dev);
/* Notify AP that I wake up again */
rtllib_sta_ps_send_null_frame(ieee, 0);
if (ieee->link_detect_info.NumRecvBcnInPeriod == 0 ||
ieee->link_detect_info.NumRecvDataInPeriod == 0) {
ieee->link_detect_info.NumRecvBcnInPeriod = 1;
ieee->link_detect_info.NumRecvDataInPeriod = 1;
}
if (ieee->iw_mode == IW_MODE_ADHOC)
rtllib_start_send_beacons(ieee);
rtllib_wake_all_queues(ieee);
out:
mutex_unlock(&ieee->wx_mutex);
}
int rtllib_wx_set_scan(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int ret = 0;
if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)) {
ret = -1;
goto out;
}
if (ieee->link_state == MAC80211_LINKED) {
schedule_work(&ieee->wx_sync_scan_wq);
/* intentionally forget to up sem */
return 0;
}
out:
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_scan);
int rtllib_wx_set_essid(struct rtllib_device *ieee,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
int ret = 0, len;
short proto_started;
unsigned long flags;
rtllib_stop_scan_syncro(ieee);
mutex_lock(&ieee->wx_mutex);
proto_started = ieee->proto_started;
len = min_t(__u16, wrqu->essid.length, IW_ESSID_MAX_SIZE);
if (ieee->iw_mode == IW_MODE_MONITOR) {
ret = -1;
goto out;
}
if (proto_started)
rtllib_stop_protocol(ieee, true);
/* this is just to be sure that the GET wx callback
* has consistent infos. not needed otherwise
*/
spin_lock_irqsave(&ieee->lock, flags);
if (wrqu->essid.flags && wrqu->essid.length) {
strncpy(ieee->current_network.ssid, extra, len);
ieee->current_network.ssid_len = len;
ieee->cannot_notify = false;
ieee->ssid_set = 1;
} else {
ieee->ssid_set = 0;
ieee->current_network.ssid[0] = '\0';
ieee->current_network.ssid_len = 0;
}
spin_unlock_irqrestore(&ieee->lock, flags);
if (proto_started)
rtllib_start_protocol(ieee);
out:
mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_essid);
int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
wrqu->mode = ieee->iw_mode;
return 0;
}
EXPORT_SYMBOL(rtllib_wx_get_mode);
int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int *parms = (int *)extra;
int enable = (parms[0] > 0);
short prev = ieee->raw_tx;
mutex_lock(&ieee->wx_mutex);
if (enable)
ieee->raw_tx = 1;
else
ieee->raw_tx = 0;
netdev_info(ieee->dev, "raw TX is %s\n",
ieee->raw_tx ? "enabled" : "disabled");
if (ieee->iw_mode == IW_MODE_MONITOR) {
if (prev == 0 && ieee->raw_tx)
netif_carrier_on(ieee->dev);
if (prev && ieee->raw_tx == 1)
netif_carrier_off(ieee->dev);
}
mutex_unlock(&ieee->wx_mutex);
return 0;
}
EXPORT_SYMBOL(rtllib_wx_set_rawtx);
int rtllib_wx_get_name(struct rtllib_device *ieee, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
const char *n = ieee->mode & (WIRELESS_MODE_N_24G) ? "n" : "";
scnprintf(wrqu->name, sizeof(wrqu->name), "802.11bg%s", n);
return 0;
}
EXPORT_SYMBOL(rtllib_wx_get_name);
/* this is mostly stolen from hostap */
int rtllib_wx_set_power(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
if ((!ieee->sta_wake_up) ||
(!ieee->enter_sleep_state) ||
(!ieee->ps_is_queue_empty)) {
netdev_warn(ieee->dev,
"%s(): PS mode is tried to be use but driver missed a callback\n",
__func__);
return -1;
}
mutex_lock(&ieee->wx_mutex);
if (wrqu->power.disabled) {
ieee->ps = RTLLIB_PS_DISABLED;
goto exit;
}
if (wrqu->power.flags & IW_POWER_TIMEOUT)
ieee->ps_timeout = wrqu->power.value / 1000;
if (wrqu->power.flags & IW_POWER_PERIOD)
ieee->ps_period = wrqu->power.value / 1000;
switch (wrqu->power.flags & IW_POWER_MODE) {
case IW_POWER_UNICAST_R:
ieee->ps = RTLLIB_PS_UNICAST;
break;
case IW_POWER_MULTICAST_R:
ieee->ps = RTLLIB_PS_MBCAST;
break;
case IW_POWER_ALL_R:
ieee->ps = RTLLIB_PS_UNICAST | RTLLIB_PS_MBCAST;
break;
case IW_POWER_ON:
break;
default:
ret = -EINVAL;
goto exit;
}
exit:
mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_power);
/* this is stolen from hostap */
int rtllib_wx_get_power(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
mutex_lock(&ieee->wx_mutex);
if (ieee->ps == RTLLIB_PS_DISABLED) {
wrqu->power.disabled = 1;
goto exit;
}
wrqu->power.disabled = 0;
if ((wrqu->power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
wrqu->power.flags = IW_POWER_TIMEOUT;
wrqu->power.value = ieee->ps_timeout * 1000;
} else {
wrqu->power.flags = IW_POWER_PERIOD;
wrqu->power.value = ieee->ps_period * 1000;
}
if ((ieee->ps & (RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST)) ==
(RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST))
wrqu->power.flags |= IW_POWER_ALL_R;
else if (ieee->ps & RTLLIB_PS_MBCAST)
wrqu->power.flags |= IW_POWER_MULTICAST_R;
else
wrqu->power.flags |= IW_POWER_UNICAST_R;
exit:
mutex_unlock(&ieee->wx_mutex);
return 0;
}
EXPORT_SYMBOL(rtllib_wx_get_power);
| linux-master | drivers/staging/rtl8192e/rtllib_softmac_wx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtllib.h"
#include "rtl819x_HT.h"
u8 MCS_FILTER_ALL[16] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
u8 MCS_FILTER_1SS[16] = {
0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
;
u16 MCS_DATA_RATE[2][2][77] = {
{{13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78, 104, 156, 208, 234,
260, 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416,
468, 520, 0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182,
182, 208, 156, 195, 195, 234, 273, 273, 312, 130, 156, 181, 156,
181, 208, 234, 208, 234, 260, 260, 286, 195, 234, 273, 234, 273,
312, 351, 312, 351, 390, 390, 429},
{14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289,
43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520,
578, 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231,
173, 217, 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260,
231, 260, 289, 289, 318, 217, 260, 303, 260, 303, 347, 390, 347, 390,
433, 433, 477} },
{{27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486,
540, 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648,
864, 972, 1080, 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324,
378, 378, 432, 324, 405, 405, 486, 567, 567, 648, 270, 324, 378, 324,
378, 432, 486, 432, 486, 540, 540, 594, 405, 486, 567, 486, 567, 648,
729, 648, 729, 810, 810, 891},
{30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540,
600, 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720,
960, 1080, 1200, 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360,
420, 420, 480, 360, 450, 450, 540, 630, 630, 720, 300, 360, 420, 360,
420, 480, 540, 480, 540, 600, 600, 660, 450, 540, 630, 540, 630, 720,
810, 720, 810, 900, 900, 990} }
};
static u8 UNKNOWN_BORADCOM[3] = {0x00, 0x14, 0xbf};
static u8 LINKSYSWRT330_LINKSYSWRT300_BROADCOM[3] = {0x00, 0x1a, 0x70};
static u8 LINKSYSWRT350_LINKSYSWRT150_BROADCOM[3] = {0x00, 0x1d, 0x7e};
static u8 BELKINF5D8233V1_RALINK[3] = {0x00, 0x17, 0x3f};
static u8 BELKINF5D82334V3_RALINK[3] = {0x00, 0x1c, 0xdf};
static u8 PCI_RALINK[3] = {0x00, 0x90, 0xcc};
static u8 EDIMAX_RALINK[3] = {0x00, 0x0e, 0x2e};
static u8 AIRLINK_RALINK[3] = {0x00, 0x18, 0x02};
static u8 DLINK_ATHEROS_1[3] = {0x00, 0x1c, 0xf0};
static u8 DLINK_ATHEROS_2[3] = {0x00, 0x21, 0x91};
static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94};
static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4};
void HTUpdateDefaultSetting(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
ht_info->bRegShortGI20MHz = 1;
ht_info->bRegShortGI40MHz = 1;
ht_info->bRegBW40MHz = 1;
if (ht_info->bRegBW40MHz)
ht_info->bRegSuppCCK = 1;
else
ht_info->bRegSuppCCK = true;
ht_info->nAMSDU_MaxSize = 7935UL;
ht_info->bAMSDU_Support = 0;
ht_info->bAMPDUEnable = 1;
ht_info->AMPDU_Factor = 2;
ht_info->MPDU_Density = 0;
ht_info->self_mimo_ps = 3;
if (ht_info->self_mimo_ps == 2)
ht_info->self_mimo_ps = 3;
ieee->tx_dis_rate_fallback = 0;
ieee->tx_use_drv_assinged_rate = 0;
ieee->bTxEnableFwCalcDur = 1;
ht_info->reg_rt2rt_aggregation = 1;
ht_info->reg_rx_reorder_enable = 1;
ht_info->rx_reorder_win_size = 64;
ht_info->rx_reorder_pending_time = 30;
}
static u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
u8 is40MHz = (ht_info->bCurBW40MHz) ? 1 : 0;
u8 isShortGI = (ht_info->bCurBW40MHz) ?
((ht_info->bCurShortGI40MHz) ? 1 : 0) :
((ht_info->bCurShortGI20MHz) ? 1 : 0);
return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)];
}
u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate)
{
u16 CCKOFDMRate[12] = {0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18,
0x24, 0x30, 0x48, 0x60, 0x6c};
u8 is40MHz = 0;
u8 isShortGI = 0;
if (nDataRate < 12)
return CCKOFDMRate[nDataRate];
if (nDataRate >= 0x10 && nDataRate <= 0x1f) {
is40MHz = 0;
isShortGI = 0;
} else if (nDataRate >= 0x20 && nDataRate <= 0x2f) {
is40MHz = 1;
isShortGI = 0;
} else if (nDataRate >= 0x30 && nDataRate <= 0x3f) {
is40MHz = 0;
isShortGI = 1;
} else if (nDataRate >= 0x40 && nDataRate <= 0x4f) {
is40MHz = 1;
isShortGI = 1;
}
return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate & 0xf];
}
bool IsHTHalfNmodeAPs(struct rtllib_device *ieee)
{
bool retValue = false;
struct rtllib_network *net = &ieee->current_network;
if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
(memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
(memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
(memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
(memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
(net->ralink_cap_exist))
retValue = true;
else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) ||
!memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) ||
!memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) ||
(net->broadcom_cap_exist))
retValue = true;
else if (net->bssht.bd_rt2rt_aggregation)
retValue = true;
else
retValue = false;
return retValue;
}
static void HTIOTPeerDetermine(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
struct rtllib_network *net = &ieee->current_network;
if (net->bssht.bd_rt2rt_aggregation) {
ht_info->IOTPeer = HT_IOT_PEER_REALTEK;
if (net->bssht.rt2rt_ht_mode & RT_HT_CAP_USE_92SE)
ht_info->IOTPeer = HT_IOT_PEER_REALTEK_92SE;
if (net->bssht.rt2rt_ht_mode & RT_HT_CAP_USE_SOFTAP)
ht_info->IOTPeer = HT_IOT_PEER_92U_SOFTAP;
} else if (net->broadcom_cap_exist) {
ht_info->IOTPeer = HT_IOT_PEER_BROADCOM;
} else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) ||
!memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) ||
!memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)) {
ht_info->IOTPeer = HT_IOT_PEER_BROADCOM;
} else if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
(memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
(memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
(memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
(memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
net->ralink_cap_exist) {
ht_info->IOTPeer = HT_IOT_PEER_RALINK;
} else if ((net->atheros_cap_exist) ||
(memcmp(net->bssid, DLINK_ATHEROS_1, 3) == 0) ||
(memcmp(net->bssid, DLINK_ATHEROS_2, 3) == 0)) {
ht_info->IOTPeer = HT_IOT_PEER_ATHEROS;
} else if ((memcmp(net->bssid, CISCO_BROADCOM, 3) == 0) ||
net->cisco_cap_exist) {
ht_info->IOTPeer = HT_IOT_PEER_CISCO;
} else if ((memcmp(net->bssid, LINKSYS_MARVELL_4400N, 3) == 0) ||
net->marvell_cap_exist) {
ht_info->IOTPeer = HT_IOT_PEER_MARVELL;
} else if (net->airgo_cap_exist) {
ht_info->IOTPeer = HT_IOT_PEER_AIRGO;
} else {
ht_info->IOTPeer = HT_IOT_PEER_UNKNOWN;
}
netdev_dbg(ieee->dev, "IOTPEER: %x\n", ht_info->IOTPeer);
}
static u8 HTIOTActIsDisableMCS14(struct rtllib_device *ieee, u8 *PeerMacAddr)
{
return 0;
}
static bool HTIOTActIsDisableMCS15(struct rtllib_device *ieee)
{
return false;
}
static bool HTIOTActIsDisableMCSTwoSpatialStream(struct rtllib_device *ieee)
{
return false;
}
static u8 HTIOTActIsDisableEDCATurbo(struct rtllib_device *ieee,
u8 *PeerMacAddr)
{
return false;
}
static u8 HTIOTActIsMgntUseCCK6M(struct rtllib_device *ieee,
struct rtllib_network *network)
{
u8 retValue = 0;
if (ieee->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM)
retValue = 1;
return retValue;
}
static u8 HTIOTActIsCCDFsync(struct rtllib_device *ieee)
{
u8 retValue = 0;
if (ieee->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM)
retValue = 1;
return retValue;
}
static void HTIOTActDetermineRaFunc(struct rtllib_device *ieee, bool bPeerRx2ss)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
ht_info->iot_ra_func &= HT_IOT_RAFUNC_DISABLE_ALL;
if (ht_info->IOTPeer == HT_IOT_PEER_RALINK && !bPeerRx2ss)
ht_info->iot_ra_func |= HT_IOT_RAFUNC_PEER_1R;
if (ht_info->iot_action & HT_IOT_ACT_AMSDU_ENABLE)
ht_info->iot_ra_func |= HT_IOT_RAFUNC_TX_AMSDU;
}
void HTResetIOTSetting(struct rt_hi_throughput *ht_info)
{
ht_info->iot_action = 0;
ht_info->IOTPeer = HT_IOT_PEER_UNKNOWN;
ht_info->iot_ra_func = 0;
}
void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
u8 *len, u8 IsEncrypt, bool bAssoc)
{
struct rt_hi_throughput *pHT = ieee->ht_info;
struct ht_capab_ele *pCapELE = NULL;
if (!posHTCap || !pHT) {
netdev_warn(ieee->dev,
"%s(): posHTCap and ht_info are null\n", __func__);
return;
}
memset(posHTCap, 0, *len);
if ((bAssoc) && (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)) {
static const u8 EWC11NHTCap[] = { 0x00, 0x90, 0x4c, 0x33 };
memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
pCapELE = (struct ht_capab_ele *)&posHTCap[4];
*len = 30 + 2;
} else {
pCapELE = (struct ht_capab_ele *)posHTCap;
*len = 26 + 2;
}
pCapELE->AdvCoding = 0;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
pCapELE->ChlWidth = 0;
else
pCapELE->ChlWidth = (pHT->bRegBW40MHz ? 1 : 0);
pCapELE->MimoPwrSave = pHT->self_mimo_ps;
pCapELE->GreenField = 0;
pCapELE->ShortGI20Mhz = 1;
pCapELE->ShortGI40Mhz = 1;
pCapELE->TxSTBC = 1;
pCapELE->RxSTBC = 0;
pCapELE->DelayBA = 0;
pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE >= 7935) ? 1 : 0;
pCapELE->DssCCk = ((pHT->bRegBW40MHz) ? (pHT->bRegSuppCCK ? 1 : 0) : 0);
pCapELE->PSMP = 0;
pCapELE->LSigTxopProtect = 0;
netdev_dbg(ieee->dev,
"TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n",
pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk);
if (IsEncrypt) {
pCapELE->MPDUDensity = 7;
pCapELE->MaxRxAMPDUFactor = 2;
} else {
pCapELE->MaxRxAMPDUFactor = 3;
pCapELE->MPDUDensity = 0;
}
memcpy(pCapELE->MCS, ieee->reg_dot11ht_oper_rate_set, 16);
memset(&pCapELE->ExtHTCapInfo, 0, 2);
memset(pCapELE->TxBFCap, 0, 4);
pCapELE->ASCap = 0;
if (bAssoc) {
if (pHT->iot_action & HT_IOT_ACT_DISABLE_MCS15)
pCapELE->MCS[1] &= 0x7f;
if (pHT->iot_action & HT_IOT_ACT_DISABLE_MCS14)
pCapELE->MCS[1] &= 0xbf;
if (pHT->iot_action & HT_IOT_ACT_DISABLE_ALL_2SS)
pCapELE->MCS[1] &= 0x00;
if (pHT->iot_action & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
pCapELE->ShortGI40Mhz = 0;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
pCapELE->ChlWidth = 0;
pCapELE->MCS[1] = 0;
}
}
}
void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
u8 *len, u8 IsEncrypt)
{
struct rt_hi_throughput *pHT = ieee->ht_info;
struct ht_info_ele *pHTInfoEle = (struct ht_info_ele *)posHTInfo;
if (!posHTInfo || !pHTInfoEle) {
netdev_warn(ieee->dev,
"%s(): posHTInfo and pHTInfoEle are null\n",
__func__);
return;
}
memset(posHTInfo, 0, *len);
if (ieee->iw_mode == IW_MODE_ADHOC) {
pHTInfoEle->ControlChl = ieee->current_network.channel;
pHTInfoEle->ExtChlOffset = ((!pHT->bRegBW40MHz) ?
HT_EXTCHNL_OFFSET_NO_EXT :
(ieee->current_network.channel <= 6)
? HT_EXTCHNL_OFFSET_UPPER :
HT_EXTCHNL_OFFSET_LOWER);
pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz;
pHTInfoEle->RIFS = 0;
pHTInfoEle->PSMPAccessOnly = 0;
pHTInfoEle->SrvIntGranularity = 0;
pHTInfoEle->OptMode = pHT->current_op_mode;
pHTInfoEle->NonGFDevPresent = 0;
pHTInfoEle->DualBeacon = 0;
pHTInfoEle->SecondaryBeacon = 0;
pHTInfoEle->LSigTxopProtectFull = 0;
pHTInfoEle->PcoActive = 0;
pHTInfoEle->PcoPhase = 0;
memset(pHTInfoEle->BasicMSC, 0, 16);
*len = 22 + 2;
} else {
*len = 0;
}
}
void HTConstructRT2RTAggElement(struct rtllib_device *ieee, u8 *posRT2RTAgg,
u8 *len)
{
if (!posRT2RTAgg) {
netdev_warn(ieee->dev, "%s(): posRT2RTAgg is null\n", __func__);
return;
}
memset(posRT2RTAgg, 0, *len);
*posRT2RTAgg++ = 0x00;
*posRT2RTAgg++ = 0xe0;
*posRT2RTAgg++ = 0x4c;
*posRT2RTAgg++ = 0x02;
*posRT2RTAgg++ = 0x01;
*posRT2RTAgg = 0x30;
if (ieee->bSupportRemoteWakeUp)
*posRT2RTAgg |= RT_HT_CAP_USE_WOW;
*len = 6 + 2;
}
static u8 HT_PickMCSRate(struct rtllib_device *ieee, u8 *pOperateMCS)
{
u8 i;
if (!pOperateMCS) {
netdev_warn(ieee->dev, "%s(): pOperateMCS is null\n", __func__);
return false;
}
switch (ieee->mode) {
case WIRELESS_MODE_B:
case WIRELESS_MODE_G:
for (i = 0; i <= 15; i++)
pOperateMCS[i] = 0;
break;
case WIRELESS_MODE_N_24G:
pOperateMCS[0] &= RATE_ADPT_1SS_MASK;
pOperateMCS[1] &= RATE_ADPT_2SS_MASK;
pOperateMCS[3] &= RATE_ADPT_MCS32_MASK;
break;
default:
break;
}
return true;
}
u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
u8 *pMCSFilter)
{
u8 i, j;
u8 bitMap;
u8 mcsRate = 0;
u8 availableMcsRate[16];
if (!pMCSRateSet || !pMCSFilter) {
netdev_warn(ieee->dev,
"%s(): pMCSRateSet and pMCSFilter are null\n",
__func__);
return false;
}
for (i = 0; i < 16; i++)
availableMcsRate[i] = pMCSRateSet[i] & pMCSFilter[i];
for (i = 0; i < 16; i++) {
if (availableMcsRate[i] != 0)
break;
}
if (i == 16)
return false;
for (i = 0; i < 16; i++) {
if (availableMcsRate[i] != 0) {
bitMap = availableMcsRate[i];
for (j = 0; j < 8; j++) {
if ((bitMap % 2) != 0) {
if (HTMcsToDataRate(ieee, (8 * i + j)) >
HTMcsToDataRate(ieee, mcsRate))
mcsRate = 8 * i + j;
}
bitMap >>= 1;
}
}
}
return mcsRate | 0x80;
}
static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
u8 *pOperateMCS)
{
u8 i;
for (i = 0; i <= 15; i++)
pOperateMCS[i] = ieee->reg_dot11tx_ht_oper_rate_set[i] &
pSupportMCS[i];
HT_PickMCSRate(ieee, pOperateMCS);
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
pOperateMCS[1] = 0;
for (i = 2; i <= 15; i++)
pOperateMCS[i] = 0;
return true;
}
void HTSetConnectBwMode(struct rtllib_device *ieee,
enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
void HTOnAssocRsp(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
struct ht_capab_ele *pPeerHTCap = NULL;
struct ht_info_ele *pPeerHTInfo = NULL;
u16 nMaxAMSDUSize = 0;
u8 *pMcsFilter = NULL;
static const u8 EWC11NHTCap[] = { 0x00, 0x90, 0x4c, 0x33 };
static const u8 EWC11NHTInfo[] = { 0x00, 0x90, 0x4c, 0x34 };
if (!ht_info->bCurrentHTSupport) {
netdev_warn(ieee->dev, "%s(): HT_DISABLE\n", __func__);
return;
}
netdev_dbg(ieee->dev, "%s(): HT_ENABLE\n", __func__);
if (!memcmp(ht_info->PeerHTCapBuf, EWC11NHTCap, sizeof(EWC11NHTCap)))
pPeerHTCap = (struct ht_capab_ele *)(&ht_info->PeerHTCapBuf[4]);
else
pPeerHTCap = (struct ht_capab_ele *)(ht_info->PeerHTCapBuf);
if (!memcmp(ht_info->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
pPeerHTInfo = (struct ht_info_ele *)
(&ht_info->PeerHTInfoBuf[4]);
else
pPeerHTInfo = (struct ht_info_ele *)(ht_info->PeerHTInfoBuf);
#ifdef VERBOSE_DEBUG
print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE,
pPeerHTCap, sizeof(struct ht_capab_ele));
#endif
HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth),
(enum ht_extchnl_offset)(pPeerHTInfo->ExtChlOffset));
ht_info->cur_tx_bw40mhz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ?
true : false);
ht_info->bCurShortGI20MHz = ((ht_info->bRegShortGI20MHz) ?
((pPeerHTCap->ShortGI20Mhz == 1) ?
true : false) : false);
ht_info->bCurShortGI40MHz = ((ht_info->bRegShortGI40MHz) ?
((pPeerHTCap->ShortGI40Mhz == 1) ?
true : false) : false);
ht_info->bCurSuppCCK = ((ht_info->bRegSuppCCK) ?
((pPeerHTCap->DssCCk == 1) ? true :
false) : false);
ht_info->bCurrent_AMSDU_Support = ht_info->bAMSDU_Support;
nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize == 0) ? 3839 : 7935;
if (ht_info->nAMSDU_MaxSize > nMaxAMSDUSize)
ht_info->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize;
else
ht_info->nCurrent_AMSDU_MaxSize = ht_info->nAMSDU_MaxSize;
ht_info->bCurrentAMPDUEnable = ht_info->bAMPDUEnable;
if (ieee->rtllib_ap_sec_type &&
(ieee->rtllib_ap_sec_type(ieee) & (SEC_ALG_WEP | SEC_ALG_TKIP))) {
if ((ht_info->IOTPeer == HT_IOT_PEER_ATHEROS) ||
(ht_info->IOTPeer == HT_IOT_PEER_UNKNOWN))
ht_info->bCurrentAMPDUEnable = false;
}
if (!ht_info->reg_rt2rt_aggregation) {
if (ht_info->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
ht_info->CurrentAMPDUFactor =
pPeerHTCap->MaxRxAMPDUFactor;
else
ht_info->CurrentAMPDUFactor = ht_info->AMPDU_Factor;
} else {
if (ieee->current_network.bssht.bd_rt2rt_aggregation) {
if (ieee->pairwise_key_type != KEY_TYPE_NA)
ht_info->CurrentAMPDUFactor =
pPeerHTCap->MaxRxAMPDUFactor;
else
ht_info->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
} else {
ht_info->CurrentAMPDUFactor = min_t(u32, pPeerHTCap->MaxRxAMPDUFactor,
HT_AGG_SIZE_32K);
}
}
ht_info->current_mpdu_density = max_t(u8, ht_info->MPDU_Density,
pPeerHTCap->MPDUDensity);
if (ht_info->iot_action & HT_IOT_ACT_TX_USE_AMSDU_8K) {
ht_info->bCurrentAMPDUEnable = false;
ht_info->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
}
ht_info->cur_rx_reorder_enable = ht_info->reg_rx_reorder_enable;
if (pPeerHTCap->MCS[0] == 0)
pPeerHTCap->MCS[0] = 0xff;
HTIOTActDetermineRaFunc(ieee, ((pPeerHTCap->MCS[1]) != 0));
HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11ht_oper_rate_set);
ht_info->peer_mimo_ps = pPeerHTCap->MimoPwrSave;
if (ht_info->peer_mimo_ps == MIMO_PS_STATIC)
pMcsFilter = MCS_FILTER_1SS;
else
pMcsFilter = MCS_FILTER_ALL;
ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee,
ieee->dot11ht_oper_rate_set,
pMcsFilter);
ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
ht_info->current_op_mode = pPeerHTInfo->OptMode;
}
void HTInitializeHTInfo(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
ht_info->bCurrentHTSupport = false;
ht_info->bCurBW40MHz = false;
ht_info->cur_tx_bw40mhz = false;
ht_info->bCurShortGI20MHz = false;
ht_info->bCurShortGI40MHz = false;
ht_info->forced_short_gi = false;
ht_info->bCurSuppCCK = true;
ht_info->bCurrent_AMSDU_Support = false;
ht_info->nCurrent_AMSDU_MaxSize = ht_info->nAMSDU_MaxSize;
ht_info->current_mpdu_density = ht_info->MPDU_Density;
ht_info->CurrentAMPDUFactor = ht_info->AMPDU_Factor;
memset((void *)(&ht_info->SelfHTCap), 0,
sizeof(ht_info->SelfHTCap));
memset((void *)(&ht_info->SelfHTInfo), 0,
sizeof(ht_info->SelfHTInfo));
memset((void *)(&ht_info->PeerHTCapBuf), 0,
sizeof(ht_info->PeerHTCapBuf));
memset((void *)(&ht_info->PeerHTInfoBuf), 0,
sizeof(ht_info->PeerHTInfoBuf));
ht_info->sw_bw_in_progress = false;
ht_info->ePeerHTSpecVer = HT_SPEC_VER_IEEE;
ht_info->current_rt2rt_aggregation = false;
ht_info->current_rt2rt_long_slot_time = false;
ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0;
ht_info->IOTPeer = 0;
ht_info->iot_action = 0;
ht_info->iot_ra_func = 0;
{
u8 *RegHTSuppRateSets = &ieee->reg_ht_supp_rate_set[0];
RegHTSuppRateSets[0] = 0xFF;
RegHTSuppRateSets[1] = 0xFF;
RegHTSuppRateSets[4] = 0x01;
}
}
void HTInitializeBssDesc(struct bss_ht *pBssHT)
{
pBssHT->bd_support_ht = false;
memset(pBssHT->bd_ht_cap_buf, 0, sizeof(pBssHT->bd_ht_cap_buf));
pBssHT->bd_ht_cap_len = 0;
memset(pBssHT->bd_ht_info_buf, 0, sizeof(pBssHT->bd_ht_info_buf));
pBssHT->bd_ht_info_len = 0;
pBssHT->bd_ht_spec_ver = HT_SPEC_VER_IEEE;
pBssHT->bd_rt2rt_aggregation = false;
pBssHT->bd_rt2rt_long_slot_time = false;
pBssHT->rt2rt_ht_mode = (enum rt_ht_capability)0;
}
void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
struct rtllib_network *pNetwork)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
u8 bIOTAction = 0;
/* unmark enable_ht flag here is the same reason why unmarked in
* function rtllib_softmac_new_net. WB 2008.09.10
*/
if (pNetwork->bssht.bd_support_ht) {
ht_info->bCurrentHTSupport = true;
ht_info->ePeerHTSpecVer = pNetwork->bssht.bd_ht_spec_ver;
if (pNetwork->bssht.bd_ht_cap_len > 0 &&
pNetwork->bssht.bd_ht_cap_len <= sizeof(ht_info->PeerHTCapBuf))
memcpy(ht_info->PeerHTCapBuf,
pNetwork->bssht.bd_ht_cap_buf,
pNetwork->bssht.bd_ht_cap_len);
if (pNetwork->bssht.bd_ht_info_len > 0 &&
pNetwork->bssht.bd_ht_info_len <=
sizeof(ht_info->PeerHTInfoBuf))
memcpy(ht_info->PeerHTInfoBuf,
pNetwork->bssht.bd_ht_info_buf,
pNetwork->bssht.bd_ht_info_len);
if (ht_info->reg_rt2rt_aggregation) {
ht_info->current_rt2rt_aggregation =
pNetwork->bssht.bd_rt2rt_aggregation;
ht_info->current_rt2rt_long_slot_time =
pNetwork->bssht.bd_rt2rt_long_slot_time;
ht_info->RT2RT_HT_Mode = pNetwork->bssht.rt2rt_ht_mode;
} else {
ht_info->current_rt2rt_aggregation = false;
ht_info->current_rt2rt_long_slot_time = false;
ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0;
}
HTIOTPeerDetermine(ieee);
ht_info->iot_action = 0;
bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid);
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_DISABLE_MCS14;
bIOTAction = HTIOTActIsDisableMCS15(ieee);
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_DISABLE_MCS15;
bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee);
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_DISABLE_ALL_2SS;
bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid);
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
bIOTAction = HTIOTActIsMgntUseCCK6M(ieee, pNetwork);
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_MGNT_USE_CCK_6M;
bIOTAction = HTIOTActIsCCDFsync(ieee);
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_CDD_FSYNC;
} else {
ht_info->bCurrentHTSupport = false;
ht_info->current_rt2rt_aggregation = false;
ht_info->current_rt2rt_long_slot_time = false;
ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0;
ht_info->iot_action = 0;
ht_info->iot_ra_func = 0;
}
}
void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
struct rtllib_network *pNetwork)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
struct ht_info_ele *pPeerHTInfo =
(struct ht_info_ele *)pNetwork->bssht.bd_ht_info_buf;
if (ht_info->bCurrentHTSupport) {
if (pNetwork->bssht.bd_ht_info_len != 0)
ht_info->current_op_mode = pPeerHTInfo->OptMode;
}
}
EXPORT_SYMBOL(HT_update_self_and_peer_setting);
void HTUseDefaultSetting(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
if (ht_info->enable_ht) {
ht_info->bCurrentHTSupport = true;
ht_info->bCurSuppCCK = ht_info->bRegSuppCCK;
ht_info->bCurBW40MHz = ht_info->bRegBW40MHz;
ht_info->bCurShortGI20MHz = ht_info->bRegShortGI20MHz;
ht_info->bCurShortGI40MHz = ht_info->bRegShortGI40MHz;
if (ieee->iw_mode == IW_MODE_ADHOC)
ieee->current_network.qos_data.active =
ieee->current_network.qos_data.supported;
ht_info->bCurrent_AMSDU_Support = ht_info->bAMSDU_Support;
ht_info->nCurrent_AMSDU_MaxSize = ht_info->nAMSDU_MaxSize;
ht_info->bCurrentAMPDUEnable = ht_info->bAMPDUEnable;
ht_info->CurrentAMPDUFactor = ht_info->AMPDU_Factor;
ht_info->current_mpdu_density = ht_info->current_mpdu_density;
HTFilterMCSRate(ieee, ieee->reg_dot11tx_ht_oper_rate_set,
ieee->dot11ht_oper_rate_set);
ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee,
ieee->dot11ht_oper_rate_set,
MCS_FILTER_ALL);
ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
} else {
ht_info->bCurrentHTSupport = false;
}
}
u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame)
{
if (ieee->ht_info->bCurrentHTSupport) {
if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) {
netdev_dbg(ieee->dev, "HT CONTROL FILED EXIST!!\n");
return true;
}
}
return false;
}
static void HTSetConnectBwModeCallback(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
if (ht_info->bCurBW40MHz) {
if (ht_info->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_UPPER)
ieee->set_chan(ieee->dev,
ieee->current_network.channel + 2);
else if (ht_info->CurSTAExtChnlOffset ==
HT_EXTCHNL_OFFSET_LOWER)
ieee->set_chan(ieee->dev,
ieee->current_network.channel - 2);
else
ieee->set_chan(ieee->dev,
ieee->current_network.channel);
ieee->set_bw_mode_handler(ieee->dev, HT_CHANNEL_WIDTH_20_40,
ht_info->CurSTAExtChnlOffset);
} else {
ieee->set_chan(ieee->dev, ieee->current_network.channel);
ieee->set_bw_mode_handler(ieee->dev, HT_CHANNEL_WIDTH_20,
HT_EXTCHNL_OFFSET_NO_EXT);
}
ht_info->sw_bw_in_progress = false;
}
void HTSetConnectBwMode(struct rtllib_device *ieee,
enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
if (!ht_info->bRegBW40MHz)
return;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
bandwidth = HT_CHANNEL_WIDTH_20;
if (ht_info->sw_bw_in_progress) {
pr_info("%s: sw_bw_in_progress!!\n", __func__);
return;
}
if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
if (ieee->current_network.channel < 2 &&
Offset == HT_EXTCHNL_OFFSET_LOWER)
Offset = HT_EXTCHNL_OFFSET_NO_EXT;
if (Offset == HT_EXTCHNL_OFFSET_UPPER ||
Offset == HT_EXTCHNL_OFFSET_LOWER) {
ht_info->bCurBW40MHz = true;
ht_info->CurSTAExtChnlOffset = Offset;
} else {
ht_info->bCurBW40MHz = false;
ht_info->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
}
} else {
ht_info->bCurBW40MHz = false;
ht_info->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
}
netdev_dbg(ieee->dev, "%s():ht_info->bCurBW40MHz:%x\n", __func__,
ht_info->bCurBW40MHz);
ht_info->sw_bw_in_progress = true;
HTSetConnectBwModeCallback(ieee);
}
| linux-master | drivers/staging/rtl8192e/rtl819x_HTProc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtllib.h"
#include <linux/etherdevice.h>
#include "rtl819x_TS.h"
static void RxPktPendingTimeout(struct timer_list *t)
{
struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
rx_pkt_pending_timer);
struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
RxTsRecord[pRxTs->num]);
struct rx_reorder_entry *pReorderEntry = NULL;
unsigned long flags = 0;
u8 index = 0;
bool bPktInBuf = false;
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
if (pRxTs->rx_timeout_indicate_seq != 0xffff) {
while (!list_empty(&pRxTs->rx_pending_pkt_list)) {
pReorderEntry = (struct rx_reorder_entry *)
list_entry(pRxTs->rx_pending_pkt_list.prev,
struct rx_reorder_entry, List);
if (index == 0)
pRxTs->rx_indicate_seq = pReorderEntry->SeqNum;
if (SN_LESS(pReorderEntry->SeqNum,
pRxTs->rx_indicate_seq) ||
SN_EQUAL(pReorderEntry->SeqNum,
pRxTs->rx_indicate_seq)) {
list_del_init(&pReorderEntry->List);
if (SN_EQUAL(pReorderEntry->SeqNum,
pRxTs->rx_indicate_seq))
pRxTs->rx_indicate_seq =
(pRxTs->rx_indicate_seq + 1) % 4096;
netdev_dbg(ieee->dev,
"%s(): Indicate SeqNum: %d\n",
__func__, pReorderEntry->SeqNum);
ieee->stats_IndicateArray[index] =
pReorderEntry->prxb;
index++;
list_add_tail(&pReorderEntry->List,
&ieee->RxReorder_Unused_List);
} else {
bPktInBuf = true;
break;
}
}
}
if (index > 0) {
pRxTs->rx_timeout_indicate_seq = 0xffff;
if (index > REORDER_WIN_SIZE) {
netdev_warn(ieee->dev,
"%s(): Rx Reorder struct buffer full\n",
__func__);
spin_unlock_irqrestore(&(ieee->reorder_spinlock),
flags);
return;
}
rtllib_indicate_packets(ieee, ieee->stats_IndicateArray, index);
bPktInBuf = false;
}
if (bPktInBuf && (pRxTs->rx_timeout_indicate_seq == 0xffff)) {
pRxTs->rx_timeout_indicate_seq = pRxTs->rx_indicate_seq;
mod_timer(&pRxTs->rx_pkt_pending_timer, jiffies +
msecs_to_jiffies(ieee->ht_info->rx_reorder_pending_time)
);
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
static void TsAddBaProcess(struct timer_list *t)
{
struct tx_ts_record *pTxTs = from_timer(pTxTs, t, TsAddBaTimer);
u8 num = pTxTs->num;
struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
TxTsRecord[num]);
rtllib_ts_init_add_ba(ieee, pTxTs, BA_POLICY_IMMEDIATE, false);
netdev_dbg(ieee->dev, "%s(): ADDBA Req is started\n", __func__);
}
static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
{
eth_zero_addr(pTsCommonInfo->Addr);
memset(&pTsCommonInfo->TSpec, 0, sizeof(union tspec_body));
memset(&pTsCommonInfo->TClass, 0, sizeof(union qos_tclas) * TCLAS_NUM);
pTsCommonInfo->TClasProc = 0;
pTsCommonInfo->TClasNum = 0;
}
static void ResetTxTsEntry(struct tx_ts_record *pTS)
{
ResetTsCommonInfo(&pTS->TsCommonInfo);
pTS->TxCurSeq = 0;
pTS->bAddBaReqInProgress = false;
pTS->bAddBaReqDelayed = false;
pTS->bUsingBa = false;
pTS->bDisable_AddBa = false;
rtllib_reset_ba_entry(&pTS->TxAdmittedBARecord);
rtllib_reset_ba_entry(&pTS->TxPendingBARecord);
}
static void ResetRxTsEntry(struct rx_ts_record *pTS)
{
ResetTsCommonInfo(&pTS->ts_common_info);
pTS->rx_indicate_seq = 0xffff;
pTS->rx_timeout_indicate_seq = 0xffff;
rtllib_reset_ba_entry(&pTS->rx_admitted_ba_record);
}
void TSInitialize(struct rtllib_device *ieee)
{
struct tx_ts_record *pTxTS = ieee->TxTsRecord;
struct rx_ts_record *pRxTS = ieee->RxTsRecord;
struct rx_reorder_entry *pRxReorderEntry = ieee->RxReorderEntry;
u8 count = 0;
INIT_LIST_HEAD(&ieee->Tx_TS_Admit_List);
INIT_LIST_HEAD(&ieee->Tx_TS_Pending_List);
INIT_LIST_HEAD(&ieee->Tx_TS_Unused_List);
for (count = 0; count < TOTAL_TS_NUM; count++) {
pTxTS->num = count;
timer_setup(&pTxTS->TsAddBaTimer, TsAddBaProcess, 0);
timer_setup(&pTxTS->TxPendingBARecord.timer, rtllib_ba_setup_timeout,
0);
timer_setup(&pTxTS->TxAdmittedBARecord.timer,
rtllib_tx_ba_inact_timeout, 0);
ResetTxTsEntry(pTxTS);
list_add_tail(&pTxTS->TsCommonInfo.List,
&ieee->Tx_TS_Unused_List);
pTxTS++;
}
INIT_LIST_HEAD(&ieee->Rx_TS_Admit_List);
INIT_LIST_HEAD(&ieee->Rx_TS_Pending_List);
INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List);
for (count = 0; count < TOTAL_TS_NUM; count++) {
pRxTS->num = count;
INIT_LIST_HEAD(&pRxTS->rx_pending_pkt_list);
timer_setup(&pRxTS->rx_admitted_ba_record.timer,
rtllib_rx_ba_inact_timeout, 0);
timer_setup(&pRxTS->rx_pkt_pending_timer, RxPktPendingTimeout, 0);
ResetRxTsEntry(pRxTS);
list_add_tail(&pRxTS->ts_common_info.List,
&ieee->Rx_TS_Unused_List);
pRxTS++;
}
INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
for (count = 0; count < REORDER_ENTRY_NUM; count++) {
list_add_tail(&pRxReorderEntry->List,
&ieee->RxReorder_Unused_List);
if (count == (REORDER_ENTRY_NUM - 1))
break;
pRxReorderEntry = &ieee->RxReorderEntry[count + 1];
}
}
static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
u8 *Addr, u8 TID,
enum tr_select TxRxSelect)
{
u8 dir;
bool search_dir[4] = {0};
struct list_head *psearch_list;
struct ts_common_info *pRet = NULL;
if (ieee->iw_mode == IW_MODE_ADHOC) {
if (TxRxSelect == TX_DIR)
search_dir[DIR_UP] = true;
else
search_dir[DIR_DOWN] = true;
} else {
if (TxRxSelect == TX_DIR) {
search_dir[DIR_UP] = true;
search_dir[DIR_BI_DIR] = true;
search_dir[DIR_DIRECT] = true;
} else {
search_dir[DIR_DOWN] = true;
search_dir[DIR_BI_DIR] = true;
search_dir[DIR_DIRECT] = true;
}
}
if (TxRxSelect == TX_DIR)
psearch_list = &ieee->Tx_TS_Admit_List;
else
psearch_list = &ieee->Rx_TS_Admit_List;
for (dir = 0; dir <= DIR_BI_DIR; dir++) {
if (!search_dir[dir])
continue;
list_for_each_entry(pRet, psearch_list, List) {
if (memcmp(pRet->Addr, Addr, 6) == 0 &&
pRet->TSpec.f.TSInfo.field.ucTSID == TID &&
pRet->TSpec.f.TSInfo.field.ucDirection == dir)
break;
}
if (&pRet->List != psearch_list)
break;
}
if (pRet && &pRet->List != psearch_list)
return pRet;
return NULL;
}
static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
union tspec_body *pTSPEC, union qos_tclas *pTCLAS,
u8 TCLAS_Num, u8 TCLAS_Proc)
{
u8 count;
if (!pTsCommonInfo)
return;
memcpy(pTsCommonInfo->Addr, Addr, 6);
if (pTSPEC)
memcpy((u8 *)(&(pTsCommonInfo->TSpec)), (u8 *)pTSPEC,
sizeof(union tspec_body));
for (count = 0; count < TCLAS_Num; count++)
memcpy((u8 *)(&(pTsCommonInfo->TClass[count])),
(u8 *)pTCLAS, sizeof(union qos_tclas));
pTsCommonInfo->TClasProc = TCLAS_Proc;
pTsCommonInfo->TClasNum = TCLAS_Num;
}
bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
{
u8 UP = 0;
union tspec_body TSpec;
union qos_tsinfo *pTSInfo = &TSpec.f.TSInfo;
struct list_head *pUnusedList;
struct list_head *pAddmitList;
enum direction_value Dir;
if (is_multicast_ether_addr(Addr)) {
netdev_warn(ieee->dev, "Get TS for Broadcast or Multicast\n");
return false;
}
if (ieee->current_network.qos_data.supported == 0) {
UP = 0;
} else {
switch (TID) {
case 0:
case 3:
UP = 0;
break;
case 1:
case 2:
UP = 2;
break;
case 4:
case 5:
UP = 5;
break;
case 6:
case 7:
UP = 7;
break;
default:
netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
__func__, TID);
return false;
}
}
*ppTS = SearchAdmitTRStream(ieee, Addr, UP, TxRxSelect);
if (*ppTS)
return true;
if (!bAddNewTs) {
netdev_dbg(ieee->dev, "add new TS failed(tid:%d)\n", UP);
return false;
}
pUnusedList = (TxRxSelect == TX_DIR) ?
(&ieee->Tx_TS_Unused_List) :
(&ieee->Rx_TS_Unused_List);
pAddmitList = (TxRxSelect == TX_DIR) ?
(&ieee->Tx_TS_Admit_List) :
(&ieee->Rx_TS_Admit_List);
Dir = ((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN);
if (!list_empty(pUnusedList)) {
(*ppTS) = list_entry(pUnusedList->next,
struct ts_common_info, List);
list_del_init(&(*ppTS)->List);
if (TxRxSelect == TX_DIR) {
struct tx_ts_record *tmp =
container_of(*ppTS,
struct tx_ts_record,
TsCommonInfo);
ResetTxTsEntry(tmp);
} else {
struct rx_ts_record *tmp =
container_of(*ppTS,
struct rx_ts_record,
ts_common_info);
ResetRxTsEntry(tmp);
}
netdev_dbg(ieee->dev,
"to init current TS, UP:%d, Dir:%d, addr: %pM ppTs=%p\n",
UP, Dir, Addr, *ppTS);
pTSInfo->field.ucTrafficType = 0;
pTSInfo->field.ucTSID = UP;
pTSInfo->field.ucDirection = Dir;
pTSInfo->field.ucAccessPolicy = 1;
pTSInfo->field.ucAggregation = 0;
pTSInfo->field.ucPSB = 0;
pTSInfo->field.ucUP = UP;
pTSInfo->field.ucTSInfoAckPolicy = 0;
pTSInfo->field.ucSchedule = 0;
MakeTSEntry(*ppTS, Addr, &TSpec, NULL, 0, 0);
list_add_tail(&((*ppTS)->List), pAddmitList);
return true;
}
netdev_warn(ieee->dev,
"There is not enough dir=%d(0=up down=1) TS record to be used!",
Dir);
return false;
}
static void RemoveTsEntry(struct rtllib_device *ieee,
struct ts_common_info *pTs, enum tr_select TxRxSelect)
{
rtllib_ts_init_del_ba(ieee, pTs, TxRxSelect);
if (TxRxSelect == RX_DIR) {
struct rx_reorder_entry *pRxReorderEntry;
struct rx_ts_record *pRxTS = (struct rx_ts_record *)pTs;
if (timer_pending(&pRxTS->rx_pkt_pending_timer))
del_timer_sync(&pRxTS->rx_pkt_pending_timer);
while (!list_empty(&pRxTS->rx_pending_pkt_list)) {
pRxReorderEntry = (struct rx_reorder_entry *)
list_entry(pRxTS->rx_pending_pkt_list.prev,
struct rx_reorder_entry, List);
netdev_dbg(ieee->dev, "%s(): Delete SeqNum %d!\n",
__func__, pRxReorderEntry->SeqNum);
list_del_init(&pRxReorderEntry->List);
{
int i = 0;
struct rtllib_rxb *prxb = pRxReorderEntry->prxb;
if (unlikely(!prxb))
return;
for (i = 0; i < prxb->nr_subframes; i++)
dev_kfree_skb(prxb->subframes[i]);
kfree(prxb);
prxb = NULL;
}
list_add_tail(&pRxReorderEntry->List,
&ieee->RxReorder_Unused_List);
}
} else {
struct tx_ts_record *pTxTS = (struct tx_ts_record *)pTs;
del_timer_sync(&pTxTS->TsAddBaTimer);
}
}
void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr)
{
struct ts_common_info *pTS, *pTmpTS;
netdev_info(ieee->dev, "===========>%s, %pM\n", __func__, Addr);
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
netdev_info(ieee->dev,
"====>remove Tx_TS_admin_list\n");
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
}
}
EXPORT_SYMBOL(RemovePeerTS);
void RemoveAllTS(struct rtllib_device *ieee)
{
struct ts_common_info *pTS, *pTmpTS;
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
}
void TsStartAddBaProcess(struct rtllib_device *ieee, struct tx_ts_record *pTxTS)
{
if (pTxTS->bAddBaReqInProgress == false) {
pTxTS->bAddBaReqInProgress = true;
if (pTxTS->bAddBaReqDelayed) {
netdev_dbg(ieee->dev, "Start ADDBA after 60 sec!!\n");
mod_timer(&pTxTS->TsAddBaTimer, jiffies +
msecs_to_jiffies(TS_ADDBA_DELAY));
} else {
netdev_dbg(ieee->dev, "Immediately Start ADDBA\n");
mod_timer(&pTxTS->TsAddBaTimer, jiffies + 10);
}
} else {
netdev_dbg(ieee->dev, "BA timer is already added\n");
}
}
| linux-master | drivers/staging/rtl8192e/rtl819x_TSProc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Original code based Host AP (software wireless LAN access point) driver
* for Intersil Prism2/2.5/3 - hostap.o module, common routines
*
* Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
* <[email protected]>
* Copyright (c) 2002-2003, Jouni Malinen <[email protected]>
* Copyright (c) 2004, Intel Corporation
*
* Few modifications for Realtek's Wi-Fi drivers by
* Andrea Merello <[email protected]>
*
* A special thanks goes to Realtek for their support !
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
#include "rtllib.h"
#include "dot11d.h"
static void rtllib_rx_mgt(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *stats);
static inline void rtllib_monitor_rx(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *rx_status,
size_t hdr_length)
{
skb->dev = ieee->dev;
skb_reset_mac_header(skb);
skb_pull(skb, hdr_length);
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_80211_RAW);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
}
/* Called only as a tasklet (software IRQ) */
static struct rtllib_frag_entry *
rtllib_frag_cache_find(struct rtllib_device *ieee, unsigned int seq,
unsigned int frag, u8 tid, u8 *src, u8 *dst)
{
struct rtllib_frag_entry *entry;
int i;
for (i = 0; i < RTLLIB_FRAG_CACHE_LEN; i++) {
entry = &ieee->frag_cache[tid][i];
if (entry->skb != NULL &&
time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
netdev_dbg(ieee->dev,
"expiring fragment cache entry seq=%u last_frag=%u\n",
entry->seq, entry->last_frag);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
}
if (entry->skb != NULL && entry->seq == seq &&
(entry->last_frag + 1 == frag || frag == -1) &&
memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
return entry;
}
return NULL;
}
/* Called only as a tasklet (software IRQ) */
static struct sk_buff *
rtllib_frag_cache_get(struct rtllib_device *ieee,
struct rtllib_hdr_4addr *hdr)
{
struct sk_buff *skb = NULL;
u16 fc = le16_to_cpu(hdr->frame_ctl);
u16 sc = le16_to_cpu(hdr->seq_ctl);
unsigned int frag = WLAN_GET_SEQ_FRAG(sc);
unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
struct rtllib_frag_entry *entry;
struct rtllib_hdr_3addrqos *hdr_3addrqos;
struct rtllib_hdr_4addrqos *hdr_4addrqos;
u8 tid;
if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) &&
RTLLIB_QOS_HAS_SEQ(fc)) {
hdr_4addrqos = (struct rtllib_hdr_4addrqos *)hdr;
tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else if (RTLLIB_QOS_HAS_SEQ(fc)) {
hdr_3addrqos = (struct rtllib_hdr_3addrqos *)hdr;
tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else {
tid = 0;
}
if (frag == 0) {
/* Reserve enough space to fit maximum frame length */
skb = dev_alloc_skb(ieee->dev->mtu +
sizeof(struct rtllib_hdr_4addr) +
8 /* LLC */ +
2 /* alignment */ +
8 /* WEP */ +
ETH_ALEN /* WDS */ +
/* QOS Control */
(RTLLIB_QOS_HAS_SEQ(fc) ? 2 : 0));
if (!skb)
return NULL;
entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
ieee->frag_next_idx[tid]++;
if (ieee->frag_next_idx[tid] >= RTLLIB_FRAG_CACHE_LEN)
ieee->frag_next_idx[tid] = 0;
if (entry->skb != NULL)
dev_kfree_skb_any(entry->skb);
entry->first_frag_time = jiffies;
entry->seq = seq;
entry->last_frag = frag;
entry->skb = skb;
ether_addr_copy(entry->src_addr, hdr->addr2);
ether_addr_copy(entry->dst_addr, hdr->addr1);
} else {
/* received a fragment of a frame for which the head fragment
* should have already been received
*/
entry = rtllib_frag_cache_find(ieee, seq, frag, tid, hdr->addr2,
hdr->addr1);
if (entry != NULL) {
entry->last_frag = frag;
skb = entry->skb;
}
}
return skb;
}
/* Called only as a tasklet (software IRQ) */
static int rtllib_frag_cache_invalidate(struct rtllib_device *ieee,
struct rtllib_hdr_4addr *hdr)
{
u16 fc = le16_to_cpu(hdr->frame_ctl);
u16 sc = le16_to_cpu(hdr->seq_ctl);
unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
struct rtllib_frag_entry *entry;
struct rtllib_hdr_3addrqos *hdr_3addrqos;
struct rtllib_hdr_4addrqos *hdr_4addrqos;
u8 tid;
if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) &&
RTLLIB_QOS_HAS_SEQ(fc)) {
hdr_4addrqos = (struct rtllib_hdr_4addrqos *)hdr;
tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else if (RTLLIB_QOS_HAS_SEQ(fc)) {
hdr_3addrqos = (struct rtllib_hdr_3addrqos *)hdr;
tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else {
tid = 0;
}
entry = rtllib_frag_cache_find(ieee, seq, -1, tid, hdr->addr2,
hdr->addr1);
if (entry == NULL) {
netdev_dbg(ieee->dev,
"Couldn't invalidate fragment cache entry (seq=%u)\n",
seq);
return -1;
}
entry->skb = NULL;
return 0;
}
/* rtllib_rx_frame_mgtmt
*
* Responsible for handling management control frames
*
* Called by rtllib_rx
*/
static inline int
rtllib_rx_frame_mgmt(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats, u16 type,
u16 stype)
{
/* On the struct stats definition there is written that
* this is not mandatory.... but seems that the probe
* response parser uses it
*/
struct rtllib_hdr_3addr *hdr = (struct rtllib_hdr_3addr *)skb->data;
rx_stats->len = skb->len;
rtllib_rx_mgt(ieee, skb, rx_stats);
if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN))) {
dev_kfree_skb_any(skb);
return 0;
}
rtllib_rx_frame_softmac(ieee, skb, rx_stats, type, stype);
dev_kfree_skb_any(skb);
return 0;
}
/* No encapsulation header if EtherType < 0x600 (=length) */
/* Called by rtllib_rx_frame_decrypt */
static int rtllib_is_eapol_frame(struct rtllib_device *ieee,
struct sk_buff *skb, size_t hdrlen)
{
struct net_device *dev = ieee->dev;
u16 fc, ethertype;
struct rtllib_hdr_4addr *hdr;
u8 *pos;
if (skb->len < 24)
return 0;
hdr = (struct rtllib_hdr_4addr *)skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
/* check that the frame is unicast frame to us */
if ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) ==
RTLLIB_FCTL_TODS &&
memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
/* ToDS frame with own addr BSSID and DA */
} else if ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) ==
RTLLIB_FCTL_FROMDS &&
memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
/* FromDS frame with own addr as DA */
} else {
return 0;
}
if (skb->len < 24 + 8)
return 0;
/* check for port access entity Ethernet type */
pos = skb->data + hdrlen;
ethertype = (pos[6] << 8) | pos[7];
if (ethertype == ETH_P_PAE)
return 1;
return 0;
}
/* Called only as a tasklet (software IRQ), by rtllib_rx */
static inline int
rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
struct lib80211_crypt_data *crypt)
{
struct rtllib_hdr_4addr *hdr;
int res, hdrlen;
if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
return 0;
if (ieee->hwsec_active) {
struct cb_desc *tcb_desc = (struct cb_desc *)
(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bHwSec = 1;
if (ieee->need_sw_enc)
tcb_desc->bHwSec = 0;
}
hdr = (struct rtllib_hdr_4addr *)skb->data;
hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
netdev_dbg(ieee->dev, "decryption failed (SA= %pM) res=%d\n",
hdr->addr2, res);
if (res == -2)
netdev_dbg(ieee->dev,
"Decryption failed ICV mismatch (key %d)\n",
skb->data[hdrlen + 3] >> 6);
return -1;
}
return res;
}
/* Called only as a tasklet (software IRQ), by rtllib_rx */
static inline int
rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
int keyidx, struct lib80211_crypt_data *crypt)
{
struct rtllib_hdr_4addr *hdr;
int res, hdrlen;
if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
return 0;
if (ieee->hwsec_active) {
struct cb_desc *tcb_desc = (struct cb_desc *)
(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bHwSec = 1;
if (ieee->need_sw_enc)
tcb_desc->bHwSec = 0;
}
hdr = (struct rtllib_hdr_4addr *)skb->data;
hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
netdev_dbg(ieee->dev,
"MSDU decryption/MIC verification failed (SA= %pM keyidx=%d)\n",
hdr->addr2, keyidx);
return -1;
}
return 0;
}
/* this function is stolen from ipw2200 driver*/
#define IEEE_PACKET_RETRY_TIME (5 * HZ)
static int is_duplicate_packet(struct rtllib_device *ieee,
struct rtllib_hdr_4addr *header)
{
u16 fc = le16_to_cpu(header->frame_ctl);
u16 sc = le16_to_cpu(header->seq_ctl);
u16 seq = WLAN_GET_SEQ_SEQ(sc);
u16 frag = WLAN_GET_SEQ_FRAG(sc);
u16 *last_seq, *last_frag;
unsigned long *last_time;
struct rtllib_hdr_3addrqos *hdr_3addrqos;
struct rtllib_hdr_4addrqos *hdr_4addrqos;
u8 tid;
if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) &&
RTLLIB_QOS_HAS_SEQ(fc)) {
hdr_4addrqos = (struct rtllib_hdr_4addrqos *)header;
tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else if (RTLLIB_QOS_HAS_SEQ(fc)) {
hdr_3addrqos = (struct rtllib_hdr_3addrqos *)header;
tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else {
tid = 0;
}
switch (ieee->iw_mode) {
case IW_MODE_ADHOC:
{
struct list_head *p;
struct ieee_ibss_seq *entry = NULL;
u8 *mac = header->addr2;
int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE;
list_for_each(p, &ieee->ibss_mac_hash[index]) {
entry = list_entry(p, struct ieee_ibss_seq, list);
if (!memcmp(entry->mac, mac, ETH_ALEN))
break;
}
if (p == &ieee->ibss_mac_hash[index]) {
entry = kmalloc(sizeof(struct ieee_ibss_seq),
GFP_ATOMIC);
if (!entry)
return 0;
ether_addr_copy(entry->mac, mac);
entry->seq_num[tid] = seq;
entry->frag_num[tid] = frag;
entry->packet_time[tid] = jiffies;
list_add(&entry->list, &ieee->ibss_mac_hash[index]);
return 0;
}
last_seq = &entry->seq_num[tid];
last_frag = &entry->frag_num[tid];
last_time = &entry->packet_time[tid];
break;
}
case IW_MODE_INFRA:
last_seq = &ieee->last_rxseq_num[tid];
last_frag = &ieee->last_rxfrag_num[tid];
last_time = &ieee->last_packet_time[tid];
break;
default:
return 0;
}
if ((*last_seq == seq) &&
time_after(*last_time + IEEE_PACKET_RETRY_TIME, jiffies)) {
if (*last_frag == frag)
goto drop;
if (*last_frag + 1 != frag)
/* out-of-order fragment */
goto drop;
} else {
*last_seq = seq;
}
*last_frag = frag;
*last_time = jiffies;
return 0;
drop:
return 1;
}
static bool AddReorderEntry(struct rx_ts_record *pTS,
struct rx_reorder_entry *pReorderEntry)
{
struct list_head *pList = &pTS->rx_pending_pkt_list;
while (pList->next != &pTS->rx_pending_pkt_list) {
if (SN_LESS(pReorderEntry->SeqNum, ((struct rx_reorder_entry *)
list_entry(pList->next, struct rx_reorder_entry,
List))->SeqNum))
pList = pList->next;
else if (SN_EQUAL(pReorderEntry->SeqNum,
((struct rx_reorder_entry *)list_entry(pList->next,
struct rx_reorder_entry, List))->SeqNum))
return false;
else
break;
}
pReorderEntry->List.next = pList->next;
pReorderEntry->List.next->prev = &pReorderEntry->List;
pReorderEntry->List.prev = pList;
pList->next = &pReorderEntry->List;
return true;
}
void rtllib_indicate_packets(struct rtllib_device *ieee,
struct rtllib_rxb **prxbIndicateArray, u8 index)
{
struct net_device_stats *stats = &ieee->stats;
u8 i = 0, j = 0;
u16 ethertype;
for (j = 0; j < index; j++) {
struct rtllib_rxb *prxb = prxbIndicateArray[j];
for (i = 0; i < prxb->nr_subframes; i++) {
struct sk_buff *sub_skb = prxb->subframes[i];
/* convert hdr + possible LLC headers into Ethernet header */
ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
if (sub_skb->len >= 8 &&
((memcmp(sub_skb->data, rfc1042_header,
SNAP_SIZE) == 0 &&
ethertype != ETH_P_AARP &&
ethertype != ETH_P_IPX) ||
memcmp(sub_skb->data, bridge_tunnel_header,
SNAP_SIZE) == 0)) {
/* remove RFC1042 or Bridge-Tunnel encapsulation
* and replace EtherType
*/
skb_pull(sub_skb, SNAP_SIZE);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
} else {
u16 len;
/* Leave Ethernet header part of hdr and full payload */
len = sub_skb->len;
memcpy(skb_push(sub_skb, 2), &len, 2);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
}
/* Indicate the packets to upper layer */
if (sub_skb) {
stats->rx_packets++;
stats->rx_bytes += sub_skb->len;
memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
sub_skb->protocol = eth_type_trans(sub_skb,
ieee->dev);
sub_skb->dev = ieee->dev;
sub_skb->dev->stats.rx_packets++;
sub_skb->dev->stats.rx_bytes += sub_skb->len;
/* 802.11 crc not sufficient */
sub_skb->ip_summed = CHECKSUM_NONE;
ieee->last_rx_ps_time = jiffies;
netif_rx(sub_skb);
}
}
kfree(prxb);
prxb = NULL;
}
}
void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
struct rx_ts_record *pTS)
{
struct rx_reorder_entry *pRxReorderEntry;
u8 RfdCnt = 0;
del_timer_sync(&pTS->rx_pkt_pending_timer);
while (!list_empty(&pTS->rx_pending_pkt_list)) {
if (RfdCnt >= REORDER_WIN_SIZE) {
netdev_info(ieee->dev,
"-------------->%s() error! RfdCnt >= REORDER_WIN_SIZE\n",
__func__);
break;
}
pRxReorderEntry = (struct rx_reorder_entry *)
list_entry(pTS->rx_pending_pkt_list.prev,
struct rx_reorder_entry, List);
netdev_dbg(ieee->dev, "%s(): Indicate SeqNum %d!\n", __func__,
pRxReorderEntry->SeqNum);
list_del_init(&pRxReorderEntry->List);
ieee->RfdArray[RfdCnt] = pRxReorderEntry->prxb;
RfdCnt = RfdCnt + 1;
list_add_tail(&pRxReorderEntry->List,
&ieee->RxReorder_Unused_List);
}
rtllib_indicate_packets(ieee, ieee->RfdArray, RfdCnt);
pTS->rx_indicate_seq = 0xffff;
}
static void RxReorderIndicatePacket(struct rtllib_device *ieee,
struct rtllib_rxb *prxb,
struct rx_ts_record *pTS, u16 SeqNum)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
struct rx_reorder_entry *pReorderEntry = NULL;
u8 WinSize = ht_info->rx_reorder_win_size;
u16 WinEnd = 0;
u8 index = 0;
bool bMatchWinStart = false, bPktInBuf = false;
unsigned long flags;
netdev_dbg(ieee->dev,
"%s(): Seq is %d, pTS->rx_indicate_seq is %d, WinSize is %d\n",
__func__, SeqNum, pTS->rx_indicate_seq, WinSize);
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
WinEnd = (pTS->rx_indicate_seq + WinSize - 1) % 4096;
/* Rx Reorder initialize condition.*/
if (pTS->rx_indicate_seq == 0xffff)
pTS->rx_indicate_seq = SeqNum;
/* Drop out the packet which SeqNum is smaller than WinStart */
if (SN_LESS(SeqNum, pTS->rx_indicate_seq)) {
netdev_dbg(ieee->dev,
"Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
pTS->rx_indicate_seq, SeqNum);
ht_info->rx_reorder_drop_counter++;
{
int i;
for (i = 0; i < prxb->nr_subframes; i++)
dev_kfree_skb(prxb->subframes[i]);
kfree(prxb);
prxb = NULL;
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
return;
}
/* Sliding window manipulation. Conditions includes:
* 1. Incoming SeqNum is equal to WinStart =>Window shift 1
* 2. Incoming SeqNum is larger than the WinEnd => Window shift N
*/
if (SN_EQUAL(SeqNum, pTS->rx_indicate_seq)) {
pTS->rx_indicate_seq = (pTS->rx_indicate_seq + 1) % 4096;
bMatchWinStart = true;
} else if (SN_LESS(WinEnd, SeqNum)) {
if (SeqNum >= (WinSize - 1))
pTS->rx_indicate_seq = SeqNum + 1 - WinSize;
else
pTS->rx_indicate_seq = 4095 -
(WinSize - (SeqNum + 1)) + 1;
netdev_dbg(ieee->dev,
"Window Shift! IndicateSeq: %d, NewSeq: %d\n",
pTS->rx_indicate_seq, SeqNum);
}
/* Indication process.
* After Packet dropping and Sliding Window shifting as above, we can
* now just indicate the packets with the SeqNum smaller than latest
* WinStart and struct buffer other packets.
*
* For Rx Reorder condition:
* 1. All packets with SeqNum smaller than WinStart => Indicate
* 2. All packets with SeqNum larger than or equal to
* WinStart => Buffer it.
*/
if (bMatchWinStart) {
/* Current packet is going to be indicated.*/
netdev_dbg(ieee->dev,
"Packets indication! IndicateSeq: %d, NewSeq: %d\n",
pTS->rx_indicate_seq, SeqNum);
ieee->prxbIndicateArray[0] = prxb;
index = 1;
} else {
/* Current packet is going to be inserted into pending list.*/
if (!list_empty(&ieee->RxReorder_Unused_List)) {
pReorderEntry = (struct rx_reorder_entry *)
list_entry(ieee->RxReorder_Unused_List.next,
struct rx_reorder_entry, List);
list_del_init(&pReorderEntry->List);
/* Make a reorder entry and insert
* into a the packet list.
*/
pReorderEntry->SeqNum = SeqNum;
pReorderEntry->prxb = prxb;
if (!AddReorderEntry(pTS, pReorderEntry)) {
int i;
netdev_dbg(ieee->dev,
"%s(): Duplicate packet is dropped. IndicateSeq: %d, NewSeq: %d\n",
__func__, pTS->rx_indicate_seq,
SeqNum);
list_add_tail(&pReorderEntry->List,
&ieee->RxReorder_Unused_List);
for (i = 0; i < prxb->nr_subframes; i++)
dev_kfree_skb(prxb->subframes[i]);
kfree(prxb);
prxb = NULL;
} else {
netdev_dbg(ieee->dev,
"Pkt insert into struct buffer. IndicateSeq: %d, NewSeq: %d\n",
pTS->rx_indicate_seq, SeqNum);
}
} else {
/* Packets are dropped if there are not enough reorder
* entries. This part should be modified!! We can just
* indicate all the packets in struct buffer and get
* reorder entries.
*/
netdev_err(ieee->dev,
"%s(): There is no reorder entry! Packet is dropped!\n",
__func__);
{
int i;
for (i = 0; i < prxb->nr_subframes; i++)
dev_kfree_skb(prxb->subframes[i]);
kfree(prxb);
prxb = NULL;
}
}
}
/* Check if there is any packet need indicate.*/
while (!list_empty(&pTS->rx_pending_pkt_list)) {
netdev_dbg(ieee->dev, "%s(): start RREORDER indicate\n",
__func__);
pReorderEntry = (struct rx_reorder_entry *)
list_entry(pTS->rx_pending_pkt_list.prev,
struct rx_reorder_entry,
List);
if (SN_LESS(pReorderEntry->SeqNum, pTS->rx_indicate_seq) ||
SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq)) {
/* This protect struct buffer from overflow. */
if (index >= REORDER_WIN_SIZE) {
netdev_err(ieee->dev,
"%s(): Buffer overflow!\n",
__func__);
bPktInBuf = true;
break;
}
list_del_init(&pReorderEntry->List);
if (SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq))
pTS->rx_indicate_seq = (pTS->rx_indicate_seq + 1) %
4096;
ieee->prxbIndicateArray[index] = pReorderEntry->prxb;
netdev_dbg(ieee->dev, "%s(): Indicate SeqNum %d!\n",
__func__, pReorderEntry->SeqNum);
index++;
list_add_tail(&pReorderEntry->List,
&ieee->RxReorder_Unused_List);
} else {
bPktInBuf = true;
break;
}
}
/* Handling pending timer. Set this timer to prevent from long time
* Rx buffering.
*/
if (index > 0) {
if (timer_pending(&pTS->rx_pkt_pending_timer))
del_timer_sync(&pTS->rx_pkt_pending_timer);
pTS->rx_timeout_indicate_seq = 0xffff;
if (index > REORDER_WIN_SIZE) {
netdev_err(ieee->dev,
"%s(): Rx Reorder struct buffer full!\n",
__func__);
spin_unlock_irqrestore(&(ieee->reorder_spinlock),
flags);
return;
}
rtllib_indicate_packets(ieee, ieee->prxbIndicateArray, index);
bPktInBuf = false;
}
if (bPktInBuf && pTS->rx_timeout_indicate_seq == 0xffff) {
netdev_dbg(ieee->dev, "%s(): SET rx timeout timer\n", __func__);
pTS->rx_timeout_indicate_seq = pTS->rx_indicate_seq;
mod_timer(&pTS->rx_pkt_pending_timer, jiffies +
msecs_to_jiffies(ht_info->rx_reorder_pending_time));
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats,
struct rtllib_rxb *rxb, u8 *src, u8 *dst)
{
struct rtllib_hdr_3addr *hdr = (struct rtllib_hdr_3addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
u16 LLCOffset = sizeof(struct rtllib_hdr_3addr);
u16 ChkLength;
bool bIsAggregateFrame = false;
u16 nSubframe_Length;
u8 nPadding_Length = 0;
u16 SeqNum = 0;
struct sk_buff *sub_skb;
/* just for debug purpose */
SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl));
if ((RTLLIB_QOS_HAS_SEQ(fc)) &&
(((union frameqos *)(skb->data + RTLLIB_3ADDR_LEN))->field.reserved))
bIsAggregateFrame = true;
if (RTLLIB_QOS_HAS_SEQ(fc))
LLCOffset += 2;
if (rx_stats->bContainHTC)
LLCOffset += sHTCLng;
ChkLength = LLCOffset;
if (skb->len <= ChkLength)
return 0;
skb_pull(skb, LLCOffset);
ieee->bIsAggregateFrame = bIsAggregateFrame;
if (!bIsAggregateFrame) {
rxb->nr_subframes = 1;
/* altered by clark 3/30/2010
* The struct buffer size of the skb indicated to upper layer
* must be less than 5000, or the defraged IP datagram
* in the IP layer will exceed "ipfrag_high_tresh" and be
* discarded. so there must not use the function
* "skb_copy" and "skb_clone" for "skb".
*/
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(RTLLIB_SKBBUFFER_SIZE);
if (!sub_skb)
return 0;
skb_reserve(sub_skb, 12);
skb_put_data(sub_skb, skb->data, skb->len);
sub_skb->dev = ieee->dev;
rxb->subframes[0] = sub_skb;
memcpy(rxb->src, src, ETH_ALEN);
memcpy(rxb->dst, dst, ETH_ALEN);
rxb->subframes[0]->dev = ieee->dev;
return 1;
}
rxb->nr_subframes = 0;
memcpy(rxb->src, src, ETH_ALEN);
memcpy(rxb->dst, dst, ETH_ALEN);
while (skb->len > ETHERNET_HEADER_SIZE) {
/* Offset 12 denote 2 mac address */
nSubframe_Length = *((u16 *)(skb->data + 12));
nSubframe_Length = (nSubframe_Length >> 8) +
(nSubframe_Length << 8);
if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
netdev_info(ieee->dev,
"%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",
__func__, rxb->nr_subframes);
netdev_info(ieee->dev,
"%s: A-MSDU parse error!! Subframe Length: %d\n",
__func__, nSubframe_Length);
netdev_info(ieee->dev,
"nRemain_Length is %d and nSubframe_Length is : %d\n",
skb->len, nSubframe_Length);
netdev_info(ieee->dev,
"The Packet SeqNum is %d\n",
SeqNum);
return 0;
}
/* move the data point to data content */
skb_pull(skb, ETHERNET_HEADER_SIZE);
/* altered by clark 3/30/2010
* The struct buffer size of the skb indicated to upper layer
* must be less than 5000, or the defraged IP datagram
* in the IP layer will exceed "ipfrag_high_tresh" and be
* discarded. so there must not use the function
* "skb_copy" and "skb_clone" for "skb".
*/
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
if (!sub_skb)
return 0;
skb_reserve(sub_skb, 12);
skb_put_data(sub_skb, skb->data, nSubframe_Length);
sub_skb->dev = ieee->dev;
rxb->subframes[rxb->nr_subframes++] = sub_skb;
if (rxb->nr_subframes >= MAX_SUBFRAME_COUNT) {
netdev_dbg(ieee->dev,
"ParseSubframe(): Too many Subframes! Packets dropped!\n");
break;
}
skb_pull(skb, nSubframe_Length);
if (skb->len != 0) {
nPadding_Length = 4 - ((nSubframe_Length +
ETHERNET_HEADER_SIZE) % 4);
if (nPadding_Length == 4)
nPadding_Length = 0;
if (skb->len < nPadding_Length)
return 0;
skb_pull(skb, nPadding_Length);
}
}
return rxb->nr_subframes;
}
static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats)
{
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
size_t hdrlen;
hdrlen = rtllib_get_hdrlen(fc);
if (HTCCheck(ieee, skb->data)) {
if (net_ratelimit())
netdev_info(ieee->dev, "%s: find HTCControl!\n",
__func__);
hdrlen += 4;
rx_stats->bContainHTC = true;
}
if (RTLLIB_QOS_HAS_SEQ(fc))
rx_stats->bIsQosData = true;
return hdrlen;
}
static int rtllib_rx_check_duplicate(struct rtllib_device *ieee,
struct sk_buff *skb, u8 multicast)
{
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
u16 fc, sc;
u8 frag, type, stype;
fc = le16_to_cpu(hdr->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
sc = le16_to_cpu(hdr->seq_ctl);
frag = WLAN_GET_SEQ_FRAG(sc);
if (!ieee->ht_info->cur_rx_reorder_enable ||
!ieee->current_network.qos_data.active ||
!IsDataFrame(skb->data) ||
IsLegacyDataFrame(skb->data)) {
if (!((type == RTLLIB_FTYPE_MGMT) &&
(stype == RTLLIB_STYPE_BEACON))) {
if (is_duplicate_packet(ieee, hdr))
return -1;
}
} else {
struct rx_ts_record *pRxTS = NULL;
if (GetTs(ieee, (struct ts_common_info **)&pRxTS, hdr->addr2,
(u8)Frame_QoSTID((u8 *)(skb->data)), RX_DIR, true)) {
if ((fc & (1 << 11)) && (frag == pRxTS->rx_last_frag_num) &&
(WLAN_GET_SEQ_SEQ(sc) == pRxTS->rx_last_seq_num))
return -1;
pRxTS->rx_last_frag_num = frag;
pRxTS->rx_last_seq_num = WLAN_GET_SEQ_SEQ(sc);
} else {
netdev_warn(ieee->dev, "%s(): No TS! Skip the check!\n",
__func__);
return -1;
}
}
return 0;
}
static void rtllib_rx_extract_addr(struct rtllib_device *ieee,
struct rtllib_hdr_4addr *hdr, u8 *dst,
u8 *src, u8 *bssid)
{
u16 fc = le16_to_cpu(hdr->frame_ctl);
switch (fc & (RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) {
case RTLLIB_FCTL_FROMDS:
ether_addr_copy(dst, hdr->addr1);
ether_addr_copy(src, hdr->addr3);
ether_addr_copy(bssid, hdr->addr2);
break;
case RTLLIB_FCTL_TODS:
ether_addr_copy(dst, hdr->addr3);
ether_addr_copy(src, hdr->addr2);
ether_addr_copy(bssid, hdr->addr1);
break;
case RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS:
ether_addr_copy(dst, hdr->addr3);
ether_addr_copy(src, hdr->addr4);
ether_addr_copy(bssid, ieee->current_network.bssid);
break;
default:
ether_addr_copy(dst, hdr->addr1);
ether_addr_copy(src, hdr->addr2);
ether_addr_copy(bssid, hdr->addr3);
break;
}
}
static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc,
u8 *dst, u8 *src, u8 *bssid, u8 *addr2)
{
u8 type, stype;
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
/* Filter frames from different BSS */
if (((fc & RTLLIB_FCTL_DSTODS) != RTLLIB_FCTL_DSTODS) &&
!ether_addr_equal(ieee->current_network.bssid, bssid) &&
!is_zero_ether_addr(ieee->current_network.bssid)) {
return -1;
}
/* Filter packets sent by an STA that will be forwarded by AP */
if (ieee->intel_promiscuous_md_info.promiscuous_on &&
ieee->intel_promiscuous_md_info.fltr_src_sta_frame) {
if ((fc & RTLLIB_FCTL_TODS) && !(fc & RTLLIB_FCTL_FROMDS) &&
!ether_addr_equal(dst, ieee->current_network.bssid) &&
ether_addr_equal(bssid, ieee->current_network.bssid)) {
return -1;
}
}
/* Nullfunc frames may have PS-bit set, so they must be passed to
* hostap_handle_sta_rx() before being dropped here.
*/
if (!ieee->intel_promiscuous_md_info.promiscuous_on) {
if (stype != RTLLIB_STYPE_DATA &&
stype != RTLLIB_STYPE_DATA_CFACK &&
stype != RTLLIB_STYPE_DATA_CFPOLL &&
stype != RTLLIB_STYPE_DATA_CFACKPOLL &&
stype != RTLLIB_STYPE_QOS_DATA) {
if (stype != RTLLIB_STYPE_NULLFUNC)
netdev_dbg(ieee->dev,
"RX: dropped data frame with no data (type=0x%02x, subtype=0x%02x)\n",
type, stype);
return -1;
}
}
/* packets from our adapter are dropped (echo) */
if (!memcmp(src, ieee->dev->dev_addr, ETH_ALEN))
return -1;
/* {broad,multi}cast packets to our BSS go through */
if (is_multicast_ether_addr(dst)) {
if (memcmp(bssid, ieee->current_network.bssid,
ETH_ALEN))
return -1;
}
return 0;
}
static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
struct lib80211_crypt_data **crypt, size_t hdrlen)
{
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
int idx = 0;
if (skb->len >= hdrlen + 3)
idx = skb->data[hdrlen + 3] >> 6;
*crypt = ieee->crypt_info.crypt[idx];
/* allow NULL decrypt to indicate an station specific override
* for default encryption
*/
if (*crypt && ((*crypt)->ops == NULL ||
(*crypt)->ops->decrypt_mpdu == NULL))
*crypt = NULL;
if (!*crypt && (fc & RTLLIB_FCTL_WEP)) {
/* This seems to be triggered by some (multicast?)
* frames from other than current BSS, so just drop the
* frames silently instead of filling system log with
* these reports.
*/
netdev_dbg(ieee->dev,
"Decryption failed (not set) (SA= %pM)\n",
hdr->addr2);
return -1;
}
return 0;
}
static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats,
struct lib80211_crypt_data *crypt, size_t hdrlen)
{
struct rtllib_hdr_4addr *hdr;
int keyidx = 0;
u16 fc, sc;
u8 frag;
hdr = (struct rtllib_hdr_4addr *)skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
sc = le16_to_cpu(hdr->seq_ctl);
frag = WLAN_GET_SEQ_FRAG(sc);
if ((!rx_stats->Decrypted))
ieee->need_sw_enc = 1;
else
ieee->need_sw_enc = 0;
keyidx = rtllib_rx_frame_decrypt(ieee, skb, crypt);
if ((fc & RTLLIB_FCTL_WEP) && (keyidx < 0)) {
netdev_info(ieee->dev, "%s: decrypt frame error\n", __func__);
return -1;
}
hdr = (struct rtllib_hdr_4addr *)skb->data;
if ((frag != 0 || (fc & RTLLIB_FCTL_MOREFRAGS))) {
int flen;
struct sk_buff *frag_skb = rtllib_frag_cache_get(ieee, hdr);
netdev_dbg(ieee->dev, "Rx Fragment received (%u)\n", frag);
if (!frag_skb) {
netdev_dbg(ieee->dev,
"Rx cannot get skb from fragment cache (morefrag=%d seq=%u frag=%u)\n",
(fc & RTLLIB_FCTL_MOREFRAGS) != 0,
WLAN_GET_SEQ_SEQ(sc), frag);
return -1;
}
flen = skb->len;
if (frag != 0)
flen -= hdrlen;
if (frag_skb->tail + flen > frag_skb->end) {
netdev_warn(ieee->dev,
"%s: host decrypted and reassembled frame did not fit skb\n",
__func__);
rtllib_frag_cache_invalidate(ieee, hdr);
return -1;
}
if (frag == 0) {
/* copy first fragment (including full headers) into
* beginning of the fragment cache skb
*/
skb_put_data(frag_skb, skb->data, flen);
} else {
/* append frame payload to the end of the fragment
* cache skb
*/
skb_put_data(frag_skb, skb->data + hdrlen, flen);
}
dev_kfree_skb_any(skb);
skb = NULL;
if (fc & RTLLIB_FCTL_MOREFRAGS) {
/* more fragments expected - leave the skb in fragment
* cache for now; it will be delivered to upper layers
* after all fragments have been received
*/
return -2;
}
/* this was the last fragment and the frame will be
* delivered, so remove skb from fragment cache
*/
skb = frag_skb;
hdr = (struct rtllib_hdr_4addr *)skb->data;
rtllib_frag_cache_invalidate(ieee, hdr);
}
/* skb: hdr + (possible reassembled) full MSDU payload; possibly still
* encrypted/authenticated
*/
if ((fc & RTLLIB_FCTL_WEP) &&
rtllib_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
netdev_info(ieee->dev, "%s: ==>decrypt msdu error\n", __func__);
return -1;
}
hdr = (struct rtllib_hdr_4addr *)skb->data;
if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep) {
if (/*ieee->ieee802_1x &&*/
rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
/* pass unencrypted EAPOL frames even if encryption is
* configured
*/
struct eapol *eap = (struct eapol *)(skb->data +
24);
netdev_dbg(ieee->dev,
"RX: IEEE 802.1X EAPOL frame: %s\n",
eap_get_type(eap->type));
} else {
netdev_dbg(ieee->dev,
"encryption configured, but RX frame not encrypted (SA= %pM)\n",
hdr->addr2);
return -1;
}
}
if (crypt && !(fc & RTLLIB_FCTL_WEP) &&
rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
struct eapol *eap = (struct eapol *)(skb->data + 24);
netdev_dbg(ieee->dev, "RX: IEEE 802.1X EAPOL frame: %s\n",
eap_get_type(eap->type));
}
if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep &&
!rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
netdev_dbg(ieee->dev,
"dropped unencrypted RX data frame from %pM (drop_unencrypted=1)\n",
hdr->addr2);
return -1;
}
return 0;
}
static void rtllib_rx_check_leave_lps(struct rtllib_device *ieee, u8 unicast,
u8 nr_subframes)
{
if (unicast) {
if (ieee->link_state == MAC80211_LINKED) {
if (((ieee->link_detect_info.NumRxUnicastOkInPeriod +
ieee->link_detect_info.NumTxOkInPeriod) > 8) ||
(ieee->link_detect_info.NumRxUnicastOkInPeriod > 2)) {
ieee->leisure_ps_leave(ieee->dev);
}
}
}
ieee->last_rx_ps_time = jiffies;
}
static void rtllib_rx_indicate_pkt_legacy(struct rtllib_device *ieee,
struct rtllib_rx_stats *rx_stats,
struct rtllib_rxb *rxb,
u8 *dst,
u8 *src)
{
struct net_device *dev = ieee->dev;
u16 ethertype;
int i = 0;
if (rxb == NULL) {
netdev_info(dev, "%s: rxb is NULL!!\n", __func__);
return;
}
for (i = 0; i < rxb->nr_subframes; i++) {
struct sk_buff *sub_skb = rxb->subframes[i];
if (sub_skb) {
/* convert hdr + possible LLC headers
* into Ethernet header
*/
ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
if (sub_skb->len >= 8 &&
((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 &&
ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) {
/* remove RFC1042 or Bridge-Tunnel encapsulation
* and replace EtherType
*/
skb_pull(sub_skb, SNAP_SIZE);
ether_addr_copy(skb_push(sub_skb, ETH_ALEN),
src);
ether_addr_copy(skb_push(sub_skb, ETH_ALEN),
dst);
} else {
u16 len;
/* Leave Ethernet header part of hdr
* and full payload
*/
len = sub_skb->len;
memcpy(skb_push(sub_skb, 2), &len, 2);
ether_addr_copy(skb_push(sub_skb, ETH_ALEN),
src);
ether_addr_copy(skb_push(sub_skb, ETH_ALEN),
dst);
}
ieee->stats.rx_packets++;
ieee->stats.rx_bytes += sub_skb->len;
if (is_multicast_ether_addr(dst))
ieee->stats.multicast++;
/* Indicate the packets to upper layer */
memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
sub_skb->protocol = eth_type_trans(sub_skb, dev);
sub_skb->dev = dev;
sub_skb->dev->stats.rx_packets++;
sub_skb->dev->stats.rx_bytes += sub_skb->len;
/* 802.11 crc not sufficient */
sub_skb->ip_summed = CHECKSUM_NONE;
netif_rx(sub_skb);
}
}
kfree(rxb);
}
static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats)
{
struct net_device *dev = ieee->dev;
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
struct lib80211_crypt_data *crypt = NULL;
struct rtllib_rxb *rxb = NULL;
struct rx_ts_record *pTS = NULL;
u16 fc, sc, SeqNum = 0;
u8 type, stype, multicast = 0, unicast = 0, nr_subframes = 0, TID = 0;
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
u8 bssid[ETH_ALEN] = {0};
size_t hdrlen = 0;
bool bToOtherSTA = false;
int ret = 0, i = 0;
fc = le16_to_cpu(hdr->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
sc = le16_to_cpu(hdr->seq_ctl);
/*Filter pkt not to me*/
multicast = is_multicast_ether_addr(hdr->addr1);
unicast = !multicast;
if (unicast && !ether_addr_equal(dev->dev_addr, hdr->addr1)) {
if (ieee->net_promiscuous_md)
bToOtherSTA = true;
else
goto rx_dropped;
}
/*Filter pkt has too small length */
hdrlen = rtllib_rx_get_hdrlen(ieee, skb, rx_stats);
if (skb->len < hdrlen) {
netdev_info(dev,
"%s():ERR!!! skb->len is smaller than hdrlen\n",
__func__);
goto rx_dropped;
}
/* Filter Duplicate pkt */
ret = rtllib_rx_check_duplicate(ieee, skb, multicast);
if (ret < 0)
goto rx_dropped;
/* Filter CTRL Frame */
if (type == RTLLIB_FTYPE_CTL)
goto rx_dropped;
/* Filter MGNT Frame */
if (type == RTLLIB_FTYPE_MGMT) {
if (bToOtherSTA)
goto rx_dropped;
if (rtllib_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
goto rx_dropped;
else
goto rx_exit;
}
/* Filter WAPI DATA Frame */
/* Update statstics for AP roaming */
if (!bToOtherSTA) {
ieee->link_detect_info.NumRecvDataInPeriod++;
ieee->link_detect_info.NumRxOkInPeriod++;
}
/* Data frame - extract src/dst addresses */
rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid);
/* Filter Data frames */
ret = rtllib_rx_data_filter(ieee, fc, dst, src, bssid, hdr->addr2);
if (ret < 0)
goto rx_dropped;
if (skb->len == hdrlen)
goto rx_dropped;
/* Send pspoll based on moredata */
if ((ieee->iw_mode == IW_MODE_INFRA) &&
(ieee->sta_sleep == LPS_IS_SLEEP) &&
(ieee->polling) && (!bToOtherSTA)) {
if (WLAN_FC_MORE_DATA(fc)) {
/* more data bit is set, let's request a new frame
* from the AP
*/
rtllib_sta_ps_send_pspoll_frame(ieee);
} else {
ieee->polling = false;
}
}
/* Get crypt if encrypted */
ret = rtllib_rx_get_crypt(ieee, skb, &crypt, hdrlen);
if (ret == -1)
goto rx_dropped;
/* Decrypt data frame (including reassemble) */
ret = rtllib_rx_decrypt(ieee, skb, rx_stats, crypt, hdrlen);
if (ret == -1)
goto rx_dropped;
else if (ret == -2)
goto rx_exit;
/* Get TS for Rx Reorder */
hdr = (struct rtllib_hdr_4addr *)skb->data;
if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
&& !is_multicast_ether_addr(hdr->addr1)
&& (!bToOtherSTA)) {
TID = Frame_QoSTID(skb->data);
SeqNum = WLAN_GET_SEQ_SEQ(sc);
GetTs(ieee, (struct ts_common_info **)&pTS, hdr->addr2, TID,
RX_DIR, true);
if (TID != 0 && TID != 3)
ieee->bis_any_nonbepkts = true;
}
/* Parse rx data frame (For AMSDU) */
/* skb: hdr + (possible reassembled) full plaintext payload */
rxb = kmalloc(sizeof(struct rtllib_rxb), GFP_ATOMIC);
if (!rxb)
goto rx_dropped;
/* to parse amsdu packets */
/* qos data packets & reserved bit is 1 */
if (parse_subframe(ieee, skb, rx_stats, rxb, src, dst) == 0) {
/* only to free rxb, and not submit the packets
* to upper layer
*/
for (i = 0; i < rxb->nr_subframes; i++)
dev_kfree_skb(rxb->subframes[i]);
kfree(rxb);
rxb = NULL;
goto rx_dropped;
}
/* Update WAPI PN */
/* Check if leave LPS */
if (!bToOtherSTA) {
if (ieee->bIsAggregateFrame)
nr_subframes = rxb->nr_subframes;
else
nr_subframes = 1;
if (unicast)
ieee->link_detect_info.NumRxUnicastOkInPeriod += nr_subframes;
rtllib_rx_check_leave_lps(ieee, unicast, nr_subframes);
}
/* Indicate packets to upper layer or Rx Reorder */
if (!ieee->ht_info->cur_rx_reorder_enable || pTS == NULL || bToOtherSTA)
rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src);
else
RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum);
dev_kfree_skb(skb);
rx_exit:
return 1;
rx_dropped:
ieee->stats.rx_dropped++;
/* Returning 0 indicates to caller that we have not handled the SKB--
* so it is still allocated and can be used again by underlying
* hardware as a DMA target
*/
return 0;
}
static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats)
{
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
size_t hdrlen = rtllib_get_hdrlen(fc);
if (skb->len < hdrlen) {
netdev_info(ieee->dev,
"%s():ERR!!! skb->len is smaller than hdrlen\n",
__func__);
return 0;
}
if (HTCCheck(ieee, skb->data)) {
if (net_ratelimit())
netdev_info(ieee->dev, "%s: Find HTCControl!\n",
__func__);
hdrlen += 4;
}
ieee->stats.rx_packets++;
ieee->stats.rx_bytes += skb->len;
rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
return 1;
}
/* All received frames are sent to this function. @skb contains the frame in
* IEEE 802.11 format, i.e., in the format it was sent over air.
* This function is called only as a tasklet (software IRQ).
*/
int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats)
{
int ret = 0;
if (!ieee || !skb || !rx_stats) {
pr_info("%s: Input parameters NULL!\n", __func__);
goto rx_dropped;
}
if (skb->len < 10) {
netdev_info(ieee->dev, "%s: SKB length < 10\n", __func__);
goto rx_dropped;
}
switch (ieee->iw_mode) {
case IW_MODE_ADHOC:
case IW_MODE_INFRA:
ret = rtllib_rx_InfraAdhoc(ieee, skb, rx_stats);
break;
case IW_MODE_MONITOR:
ret = rtllib_rx_Monitor(ieee, skb, rx_stats);
break;
default:
netdev_info(ieee->dev, "%s: ERR iw mode!!!\n", __func__);
break;
}
return ret;
rx_dropped:
if (ieee)
ieee->stats.rx_dropped++;
return 0;
}
EXPORT_SYMBOL(rtllib_rx);
static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
/* Make ther structure we read from the beacon packet has the right values */
static int rtllib_verify_qos_info(struct rtllib_qos_information_element
*info_element, int sub_type)
{
if (info_element->elementID != QOS_ELEMENT_ID)
return -1;
if (info_element->qui_subtype != sub_type)
return -1;
if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN))
return -1;
if (info_element->qui_type != QOS_OUI_TYPE)
return -1;
if (info_element->version != QOS_VERSION_1)
return -1;
return 0;
}
/* Parse a QoS parameter element */
static int rtllib_read_qos_param_element(
struct rtllib_qos_parameter_info *element_param,
struct rtllib_info_element *info_element)
{
size_t size = sizeof(*element_param);
if (!element_param || !info_element || info_element->len != size - 2)
return -1;
memcpy(element_param, info_element, size);
return rtllib_verify_qos_info(&element_param->info_element,
QOS_OUI_PARAM_SUB_TYPE);
}
/* Parse a QoS information element */
static int rtllib_read_qos_info_element(
struct rtllib_qos_information_element *element_info,
struct rtllib_info_element *info_element)
{
size_t size = sizeof(*element_info);
if (!element_info || !info_element || info_element->len != size - 2)
return -1;
memcpy(element_info, info_element, size);
return rtllib_verify_qos_info(element_info, QOS_OUI_INFO_SUB_TYPE);
}
/* Write QoS parameters from the ac parameters. */
static int rtllib_qos_convert_ac_to_parameters(struct rtllib_qos_parameter_info *param_elm,
struct rtllib_qos_data *qos_data)
{
struct rtllib_qos_ac_parameter *ac_params;
struct rtllib_qos_parameters *qos_param = &(qos_data->parameters);
int i;
u8 aci;
u8 acm;
qos_data->wmm_acm = 0;
for (i = 0; i < QOS_QUEUE_NUM; i++) {
ac_params = &(param_elm->ac_params_record[i]);
aci = (ac_params->aci_aifsn & 0x60) >> 5;
acm = (ac_params->aci_aifsn & 0x10) >> 4;
if (aci >= QOS_QUEUE_NUM)
continue;
switch (aci) {
case 1:
/* BIT(0) | BIT(3) */
if (acm)
qos_data->wmm_acm |= (0x01 << 0) | (0x01 << 3);
break;
case 2:
/* BIT(4) | BIT(5) */
if (acm)
qos_data->wmm_acm |= (0x01 << 4) | (0x01 << 5);
break;
case 3:
/* BIT(6) | BIT(7) */
if (acm)
qos_data->wmm_acm |= (0x01 << 6) | (0x01 << 7);
break;
case 0:
default:
/* BIT(1) | BIT(2) */
if (acm)
qos_data->wmm_acm |= (0x01 << 1) | (0x01 << 2);
break;
}
qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f;
/* WMM spec P.11: The minimum value for AIFSN shall be 2 */
qos_param->aifs[aci] = max_t(u8, qos_param->aifs[aci], 2);
qos_param->cw_min[aci] = cpu_to_le16(ac_params->ecw_min_max &
0x0F);
qos_param->cw_max[aci] = cpu_to_le16((ac_params->ecw_min_max &
0xF0) >> 4);
qos_param->flag[aci] =
(ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
qos_param->tx_op_limit[aci] = ac_params->tx_op_limit;
}
return 0;
}
/* we have a generic data element which it may contain QoS information or
* parameters element. check the information element length to decide
* which type to read
*/
static int rtllib_parse_qos_info_param_IE(struct rtllib_device *ieee,
struct rtllib_info_element
*info_element,
struct rtllib_network *network)
{
int rc = 0;
struct rtllib_qos_information_element qos_info_element;
rc = rtllib_read_qos_info_element(&qos_info_element, info_element);
if (rc == 0) {
network->qos_data.param_count = qos_info_element.ac_info & 0x0F;
network->flags |= NETWORK_HAS_QOS_INFORMATION;
} else {
struct rtllib_qos_parameter_info param_element;
rc = rtllib_read_qos_param_element(¶m_element,
info_element);
if (rc == 0) {
rtllib_qos_convert_ac_to_parameters(¶m_element,
&(network->qos_data));
network->flags |= NETWORK_HAS_QOS_PARAMETERS;
network->qos_data.param_count =
param_element.info_element.ac_info & 0x0F;
}
}
if (rc == 0) {
netdev_dbg(ieee->dev, "QoS is supported\n");
network->qos_data.supported = 1;
}
return rc;
}
static const char *get_info_element_string(u16 id)
{
switch (id) {
case MFIE_TYPE_SSID:
return "SSID";
case MFIE_TYPE_RATES:
return "RATES";
case MFIE_TYPE_FH_SET:
return "FH_SET";
case MFIE_TYPE_DS_SET:
return "DS_SET";
case MFIE_TYPE_CF_SET:
return "CF_SET";
case MFIE_TYPE_TIM:
return "TIM";
case MFIE_TYPE_IBSS_SET:
return "IBSS_SET";
case MFIE_TYPE_COUNTRY:
return "COUNTRY";
case MFIE_TYPE_HOP_PARAMS:
return "HOP_PARAMS";
case MFIE_TYPE_HOP_TABLE:
return "HOP_TABLE";
case MFIE_TYPE_REQUEST:
return "REQUEST";
case MFIE_TYPE_CHALLENGE:
return "CHALLENGE";
case MFIE_TYPE_POWER_CONSTRAINT:
return "POWER_CONSTRAINT";
case MFIE_TYPE_POWER_CAPABILITY:
return "POWER_CAPABILITY";
case MFIE_TYPE_TPC_REQUEST:
return "TPC_REQUEST";
case MFIE_TYPE_TPC_REPORT:
return "TPC_REPORT";
case MFIE_TYPE_SUPP_CHANNELS:
return "SUPP_CHANNELS";
case MFIE_TYPE_CSA:
return "CSA";
case MFIE_TYPE_MEASURE_REQUEST:
return "MEASURE_REQUEST";
case MFIE_TYPE_MEASURE_REPORT:
return "MEASURE_REPORT";
case MFIE_TYPE_QUIET:
return "QUIET";
case MFIE_TYPE_IBSS_DFS:
return "IBSS_DFS";
case MFIE_TYPE_RSN:
return "RSN";
case MFIE_TYPE_RATES_EX:
return "RATES_EX";
case MFIE_TYPE_GENERIC:
return "GENERIC";
case MFIE_TYPE_QOS_PARAMETER:
return "QOS_PARAMETER";
default:
return "UNKNOWN";
}
}
static inline void rtllib_extract_country_ie(
struct rtllib_device *ieee,
struct rtllib_info_element *info_element,
struct rtllib_network *network,
u8 *addr2)
{
if (IS_DOT11D_ENABLE(ieee)) {
if (info_element->len != 0) {
memcpy(network->CountryIeBuf, info_element->data,
info_element->len);
network->CountryIeLen = info_element->len;
if (!IS_COUNTRY_IE_VALID(ieee)) {
if (rtllib_act_scanning(ieee, false) &&
ieee->FirstIe_InScan)
netdev_info(ieee->dev,
"Received beacon CountryIE, SSID: <%s>\n",
network->ssid);
dot11d_update_country(ieee, addr2,
info_element->len,
info_element->data);
}
}
if (IS_EQUAL_CIE_SRC(ieee, addr2))
UPDATE_CIE_WATCHDOG(ieee);
}
}
static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
struct rtllib_info_element *info_element,
struct rtllib_network *network,
u16 *tmp_htcap_len,
u16 *tmp_htinfo_len)
{
u16 ht_realtek_agg_len = 0;
u8 ht_realtek_agg_buf[MAX_IE_LEN];
if (!rtllib_parse_qos_info_param_IE(ieee, info_element, network))
return;
if (info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x50 &&
info_element->data[2] == 0xf2 &&
info_element->data[3] == 0x01) {
network->wpa_ie_len = min(info_element->len + 2,
MAX_WPA_IE_LEN);
memcpy(network->wpa_ie, info_element, network->wpa_ie_len);
return;
}
if (info_element->len == 7 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0xe0 &&
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x01 &&
info_element->data[4] == 0x02)
network->Turbo_Enable = 1;
if (*tmp_htcap_len == 0) {
if (info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x90 &&
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x033) {
*tmp_htcap_len = min_t(u8, info_element->len,
MAX_IE_LEN);
if (*tmp_htcap_len != 0) {
network->bssht.bd_ht_spec_ver = HT_SPEC_VER_EWC;
network->bssht.bd_ht_cap_len = min_t(u16, *tmp_htcap_len,
sizeof(network->bssht.bd_ht_cap_buf));
memcpy(network->bssht.bd_ht_cap_buf,
info_element->data,
network->bssht.bd_ht_cap_len);
}
}
if (*tmp_htcap_len != 0) {
network->bssht.bd_support_ht = true;
network->bssht.bd_ht_1r = ((((struct ht_capab_ele *)(network->bssht.bd_ht_cap_buf))->MCS[1]) == 0);
} else {
network->bssht.bd_support_ht = false;
network->bssht.bd_ht_1r = false;
}
}
if (*tmp_htinfo_len == 0) {
if (info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x90 &&
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x034) {
*tmp_htinfo_len = min_t(u8, info_element->len,
MAX_IE_LEN);
if (*tmp_htinfo_len != 0) {
network->bssht.bd_ht_spec_ver = HT_SPEC_VER_EWC;
network->bssht.bd_ht_info_len = min_t(u16, *tmp_htinfo_len,
sizeof(network->bssht.bd_ht_info_buf));
memcpy(network->bssht.bd_ht_info_buf,
info_element->data,
network->bssht.bd_ht_info_len);
}
}
}
if (network->bssht.bd_support_ht) {
if (info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0xe0 &&
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x02) {
ht_realtek_agg_len = min_t(u8, info_element->len,
MAX_IE_LEN);
memcpy(ht_realtek_agg_buf, info_element->data,
info_element->len);
}
if (ht_realtek_agg_len >= 5) {
network->realtek_cap_exit = true;
network->bssht.bd_rt2rt_aggregation = true;
if ((ht_realtek_agg_buf[4] == 1) &&
(ht_realtek_agg_buf[5] & 0x02))
network->bssht.bd_rt2rt_long_slot_time = true;
if ((ht_realtek_agg_buf[4] == 1) &&
(ht_realtek_agg_buf[5] & RT_HT_CAP_USE_92SE))
network->bssht.rt2rt_ht_mode |= RT_HT_CAP_USE_92SE;
}
}
if (ht_realtek_agg_len >= 5) {
if ((ht_realtek_agg_buf[5] & RT_HT_CAP_USE_SOFTAP))
network->bssht.rt2rt_ht_mode |= RT_HT_CAP_USE_SOFTAP;
}
if ((info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x05 &&
info_element->data[2] == 0xb5) ||
(info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x0a &&
info_element->data[2] == 0xf7) ||
(info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x10 &&
info_element->data[2] == 0x18)) {
network->broadcom_cap_exist = true;
}
if (info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x0c &&
info_element->data[2] == 0x43)
network->ralink_cap_exist = true;
if ((info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x03 &&
info_element->data[2] == 0x7f) ||
(info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x13 &&
info_element->data[2] == 0x74))
network->atheros_cap_exist = true;
if ((info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x50 &&
info_element->data[2] == 0x43))
network->marvell_cap_exist = true;
if (info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
info_element->data[2] == 0x96)
network->cisco_cap_exist = true;
if (info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x0a &&
info_element->data[2] == 0xf5)
network->airgo_cap_exist = true;
if (info_element->len > 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
info_element->data[2] == 0x96 &&
info_element->data[3] == 0x01) {
if (info_element->len == 6) {
memcpy(network->CcxRmState, &info_element->data[4], 2);
if (network->CcxRmState[0] != 0)
network->bCcxRmEnable = true;
else
network->bCcxRmEnable = false;
network->MBssidMask = network->CcxRmState[1] & 0x07;
if (network->MBssidMask != 0) {
network->bMBssidValid = true;
network->MBssidMask = 0xff <<
(network->MBssidMask);
ether_addr_copy(network->MBssid,
network->bssid);
network->MBssid[5] &= network->MBssidMask;
} else {
network->bMBssidValid = false;
}
} else {
network->bCcxRmEnable = false;
}
}
if (info_element->len > 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
info_element->data[2] == 0x96 &&
info_element->data[3] == 0x03) {
if (info_element->len == 5) {
network->bWithCcxVerNum = true;
network->BssCcxVerNumber = info_element->data[4];
} else {
network->bWithCcxVerNum = false;
network->BssCcxVerNumber = 0;
}
}
if (info_element->len > 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x50 &&
info_element->data[2] == 0xf2 &&
info_element->data[3] == 0x04) {
netdev_dbg(ieee->dev, "MFIE_TYPE_WZC: %d bytes\n",
info_element->len);
network->wzc_ie_len = min(info_element->len + 2, MAX_WZC_IE_LEN);
memcpy(network->wzc_ie, info_element, network->wzc_ie_len);
}
}
static void rtllib_parse_mfie_ht_cap(struct rtllib_info_element *info_element,
struct rtllib_network *network,
u16 *tmp_htcap_len)
{
struct bss_ht *ht = &network->bssht;
*tmp_htcap_len = min_t(u8, info_element->len, MAX_IE_LEN);
if (*tmp_htcap_len != 0) {
ht->bd_ht_spec_ver = HT_SPEC_VER_EWC;
ht->bd_ht_cap_len = min_t(u16, *tmp_htcap_len,
sizeof(ht->bd_ht_cap_buf));
memcpy(ht->bd_ht_cap_buf, info_element->data, ht->bd_ht_cap_len);
ht->bd_support_ht = true;
ht->bd_ht_1r = ((((struct ht_capab_ele *)
ht->bd_ht_cap_buf))->MCS[1]) == 0;
ht->bd_bandwidth = (enum ht_channel_width)
(((struct ht_capab_ele *)
(ht->bd_ht_cap_buf))->ChlWidth);
} else {
ht->bd_support_ht = false;
ht->bd_ht_1r = false;
ht->bd_bandwidth = HT_CHANNEL_WIDTH_20;
}
}
int rtllib_parse_info_param(struct rtllib_device *ieee,
struct rtllib_info_element *info_element,
u16 length,
struct rtllib_network *network,
struct rtllib_rx_stats *stats)
{
u8 i;
short offset;
u16 tmp_htcap_len = 0;
u16 tmp_htinfo_len = 0;
char rates_str[64];
char *p;
while (length >= sizeof(*info_element)) {
if (sizeof(*info_element) + info_element->len > length) {
netdev_dbg(ieee->dev,
"Info elem: parse failed: info_element->len + 2 > left : info_element->len+2=%zd left=%d, id=%d.\n",
info_element->len + sizeof(*info_element),
length, info_element->id);
/* We stop processing but don't return an error here
* because some misbehaviour APs break this rule. ie.
* Orinoco AP1000.
*/
break;
}
switch (info_element->id) {
case MFIE_TYPE_SSID:
if (rtllib_is_empty_essid(info_element->data,
info_element->len)) {
network->flags |= NETWORK_EMPTY_ESSID;
break;
}
network->ssid_len = min(info_element->len,
(u8)IW_ESSID_MAX_SIZE);
memcpy(network->ssid, info_element->data,
network->ssid_len);
if (network->ssid_len < IW_ESSID_MAX_SIZE)
memset(network->ssid + network->ssid_len, 0,
IW_ESSID_MAX_SIZE - network->ssid_len);
netdev_dbg(ieee->dev, "MFIE_TYPE_SSID: '%s' len=%d.\n",
network->ssid, network->ssid_len);
break;
case MFIE_TYPE_RATES:
p = rates_str;
network->rates_len = min(info_element->len,
MAX_RATES_LENGTH);
for (i = 0; i < network->rates_len; i++) {
network->rates[i] = info_element->data[i];
p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates[i]);
if (rtllib_is_ofdm_rate
(info_element->data[i])) {
network->flags |= NETWORK_HAS_OFDM;
if (info_element->data[i] &
RTLLIB_BASIC_RATE_MASK)
network->flags &=
~NETWORK_HAS_CCK;
}
if (rtllib_is_cck_rate
(info_element->data[i])) {
network->flags |= NETWORK_HAS_CCK;
}
}
netdev_dbg(ieee->dev, "MFIE_TYPE_RATES: '%s' (%d)\n",
rates_str, network->rates_len);
break;
case MFIE_TYPE_RATES_EX:
p = rates_str;
network->rates_ex_len = min(info_element->len,
MAX_RATES_EX_LENGTH);
for (i = 0; i < network->rates_ex_len; i++) {
network->rates_ex[i] = info_element->data[i];
p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates_ex[i]);
if (rtllib_is_ofdm_rate
(info_element->data[i])) {
network->flags |= NETWORK_HAS_OFDM;
if (info_element->data[i] &
RTLLIB_BASIC_RATE_MASK)
network->flags &=
~NETWORK_HAS_CCK;
}
}
netdev_dbg(ieee->dev, "MFIE_TYPE_RATES_EX: '%s' (%d)\n",
rates_str, network->rates_ex_len);
break;
case MFIE_TYPE_DS_SET:
netdev_dbg(ieee->dev, "MFIE_TYPE_DS_SET: %d\n",
info_element->data[0]);
network->channel = info_element->data[0];
break;
case MFIE_TYPE_FH_SET:
netdev_dbg(ieee->dev, "MFIE_TYPE_FH_SET: ignored\n");
break;
case MFIE_TYPE_CF_SET:
netdev_dbg(ieee->dev, "MFIE_TYPE_CF_SET: ignored\n");
break;
case MFIE_TYPE_TIM:
if (info_element->len < 4)
break;
network->tim.tim_count = info_element->data[0];
network->tim.tim_period = info_element->data[1];
network->dtim_period = info_element->data[1];
if (ieee->link_state != MAC80211_LINKED)
break;
network->last_dtim_sta_time = jiffies;
network->dtim_data = RTLLIB_DTIM_VALID;
if (info_element->data[2] & 1)
network->dtim_data |= RTLLIB_DTIM_MBCAST;
offset = (info_element->data[2] >> 1) * 2;
if (ieee->assoc_id < 8 * offset ||
ieee->assoc_id > 8 * (offset + info_element->len - 3))
break;
offset = (ieee->assoc_id / 8) - offset;
if (info_element->data[3 + offset] &
(1 << (ieee->assoc_id % 8)))
network->dtim_data |= RTLLIB_DTIM_UCAST;
network->listen_interval = network->dtim_period;
break;
case MFIE_TYPE_ERP:
network->erp_value = info_element->data[0];
network->flags |= NETWORK_HAS_ERP_VALUE;
netdev_dbg(ieee->dev, "MFIE_TYPE_ERP_SET: %d\n",
network->erp_value);
break;
case MFIE_TYPE_IBSS_SET:
network->atim_window = info_element->data[0];
netdev_dbg(ieee->dev, "MFIE_TYPE_IBSS_SET: %d\n",
network->atim_window);
break;
case MFIE_TYPE_CHALLENGE:
netdev_dbg(ieee->dev, "MFIE_TYPE_CHALLENGE: ignored\n");
break;
case MFIE_TYPE_GENERIC:
netdev_dbg(ieee->dev, "MFIE_TYPE_GENERIC: %d bytes\n",
info_element->len);
rtllib_parse_mife_generic(ieee, info_element, network,
&tmp_htcap_len,
&tmp_htinfo_len);
break;
case MFIE_TYPE_RSN:
netdev_dbg(ieee->dev, "MFIE_TYPE_RSN: %d bytes\n",
info_element->len);
network->rsn_ie_len = min(info_element->len + 2,
MAX_WPA_IE_LEN);
memcpy(network->rsn_ie, info_element,
network->rsn_ie_len);
break;
case MFIE_TYPE_HT_CAP:
netdev_dbg(ieee->dev, "MFIE_TYPE_HT_CAP: %d bytes\n",
info_element->len);
rtllib_parse_mfie_ht_cap(info_element, network,
&tmp_htcap_len);
break;
case MFIE_TYPE_HT_INFO:
netdev_dbg(ieee->dev, "MFIE_TYPE_HT_INFO: %d bytes\n",
info_element->len);
tmp_htinfo_len = min_t(u8, info_element->len,
MAX_IE_LEN);
if (tmp_htinfo_len) {
network->bssht.bd_ht_spec_ver = HT_SPEC_VER_IEEE;
network->bssht.bd_ht_info_len = tmp_htinfo_len >
sizeof(network->bssht.bd_ht_info_buf) ?
sizeof(network->bssht.bd_ht_info_buf) :
tmp_htinfo_len;
memcpy(network->bssht.bd_ht_info_buf,
info_element->data,
network->bssht.bd_ht_info_len);
}
break;
case MFIE_TYPE_AIRONET:
netdev_dbg(ieee->dev, "MFIE_TYPE_AIRONET: %d bytes\n",
info_element->len);
if (info_element->len > IE_CISCO_FLAG_POSITION) {
network->bWithAironetIE = true;
if ((info_element->data[IE_CISCO_FLAG_POSITION]
& SUPPORT_CKIP_MIC) ||
(info_element->data[IE_CISCO_FLAG_POSITION]
& SUPPORT_CKIP_PK))
network->bCkipSupported = true;
else
network->bCkipSupported = false;
} else {
network->bWithAironetIE = false;
network->bCkipSupported = false;
}
break;
case MFIE_TYPE_QOS_PARAMETER:
netdev_err(ieee->dev,
"QoS Error need to parse QOS_PARAMETER IE\n");
break;
case MFIE_TYPE_COUNTRY:
netdev_dbg(ieee->dev, "MFIE_TYPE_COUNTRY: %d bytes\n",
info_element->len);
rtllib_extract_country_ie(ieee, info_element, network,
network->bssid);
break;
/* TODO */
default:
netdev_dbg(ieee->dev,
"Unsupported info element: %s (%d)\n",
get_info_element_string(info_element->id),
info_element->id);
break;
}
length -= sizeof(*info_element) + info_element->len;
info_element =
(struct rtllib_info_element *)&info_element->data[info_element->len];
}
if (!network->atheros_cap_exist && !network->broadcom_cap_exist &&
!network->cisco_cap_exist && !network->ralink_cap_exist &&
!network->bssht.bd_rt2rt_aggregation)
network->unknown_cap_exist = true;
else
network->unknown_cap_exist = false;
return 0;
}
static long rtllib_translate_todbm(u8 signal_strength_index)
{
long signal_power;
signal_power = (long)((signal_strength_index + 1) >> 1);
signal_power -= 95;
return signal_power;
}
static inline int rtllib_network_init(
struct rtllib_device *ieee,
struct rtllib_probe_response *beacon,
struct rtllib_network *network,
struct rtllib_rx_stats *stats)
{
memset(&network->qos_data, 0, sizeof(struct rtllib_qos_data));
/* Pull out fixed field data */
ether_addr_copy(network->bssid, beacon->header.addr3);
network->capability = le16_to_cpu(beacon->capability);
network->last_scanned = jiffies;
network->time_stamp[0] = beacon->time_stamp[0];
network->time_stamp[1] = beacon->time_stamp[1];
network->beacon_interval = le16_to_cpu(beacon->beacon_interval);
/* Where to pull this? beacon->listen_interval;*/
network->listen_interval = 0x0A;
network->rates_len = network->rates_ex_len = 0;
network->ssid_len = 0;
network->hidden_ssid_len = 0;
memset(network->hidden_ssid, 0, sizeof(network->hidden_ssid));
network->flags = 0;
network->atim_window = 0;
network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ?
0x3 : 0x0;
network->berp_info_valid = false;
network->broadcom_cap_exist = false;
network->ralink_cap_exist = false;
network->atheros_cap_exist = false;
network->cisco_cap_exist = false;
network->unknown_cap_exist = false;
network->realtek_cap_exit = false;
network->marvell_cap_exist = false;
network->airgo_cap_exist = false;
network->Turbo_Enable = 0;
network->SignalStrength = stats->SignalStrength;
network->RSSI = stats->SignalStrength;
network->CountryIeLen = 0;
memset(network->CountryIeBuf, 0, MAX_IE_LEN);
HTInitializeBssDesc(&network->bssht);
network->flags |= NETWORK_HAS_CCK;
network->wpa_ie_len = 0;
network->rsn_ie_len = 0;
network->wzc_ie_len = 0;
if (rtllib_parse_info_param(ieee,
beacon->info_element,
(stats->len - sizeof(*beacon)),
network,
stats))
return 1;
network->mode = 0;
if (network->flags & NETWORK_HAS_OFDM)
network->mode |= WIRELESS_MODE_G;
if (network->flags & NETWORK_HAS_CCK)
network->mode |= WIRELESS_MODE_B;
if (network->mode == 0) {
netdev_dbg(ieee->dev, "Filtered out '%s (%pM)' network.\n",
escape_essid(network->ssid, network->ssid_len),
network->bssid);
return 1;
}
if (network->bssht.bd_support_ht) {
if (network->mode & (WIRELESS_MODE_G | WIRELESS_MODE_B))
network->mode = WIRELESS_MODE_N_24G;
}
if (rtllib_is_empty_essid(network->ssid, network->ssid_len))
network->flags |= NETWORK_EMPTY_ESSID;
stats->signal = 30 + (stats->SignalStrength * 70) / 100;
stats->noise = rtllib_translate_todbm((u8)(100 - stats->signal)) - 25;
memcpy(&network->stats, stats, sizeof(network->stats));
return 0;
}
static inline int is_same_network(struct rtllib_network *src,
struct rtllib_network *dst, u8 ssidbroad)
{
/* A network is only a duplicate if the channel, BSSID, ESSID
* and the capability field (in particular IBSS and BSS) all match.
* We treat all <hidden> with the same BSSID and channel
* as one network
*/
return (((src->ssid_len == dst->ssid_len) || (!ssidbroad)) &&
(src->channel == dst->channel) &&
!memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
(!memcmp(src->ssid, dst->ssid, src->ssid_len) ||
(!ssidbroad)) &&
((src->capability & WLAN_CAPABILITY_IBSS) ==
(dst->capability & WLAN_CAPABILITY_IBSS)) &&
((src->capability & WLAN_CAPABILITY_ESS) ==
(dst->capability & WLAN_CAPABILITY_ESS)));
}
static inline void update_network(struct rtllib_device *ieee,
struct rtllib_network *dst,
struct rtllib_network *src)
{
int qos_active;
u8 old_param;
memcpy(&dst->stats, &src->stats, sizeof(struct rtllib_rx_stats));
dst->capability = src->capability;
memcpy(dst->rates, src->rates, src->rates_len);
dst->rates_len = src->rates_len;
memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
dst->rates_ex_len = src->rates_ex_len;
if (src->ssid_len > 0) {
if (dst->ssid_len == 0) {
memset(dst->hidden_ssid, 0, sizeof(dst->hidden_ssid));
dst->hidden_ssid_len = src->ssid_len;
memcpy(dst->hidden_ssid, src->ssid, src->ssid_len);
} else {
memset(dst->ssid, 0, dst->ssid_len);
dst->ssid_len = src->ssid_len;
memcpy(dst->ssid, src->ssid, src->ssid_len);
}
}
dst->mode = src->mode;
dst->flags = src->flags;
dst->time_stamp[0] = src->time_stamp[0];
dst->time_stamp[1] = src->time_stamp[1];
if (src->flags & NETWORK_HAS_ERP_VALUE) {
dst->erp_value = src->erp_value;
dst->berp_info_valid = src->berp_info_valid = true;
}
dst->beacon_interval = src->beacon_interval;
dst->listen_interval = src->listen_interval;
dst->atim_window = src->atim_window;
dst->dtim_period = src->dtim_period;
dst->dtim_data = src->dtim_data;
dst->last_dtim_sta_time = src->last_dtim_sta_time;
memcpy(&dst->tim, &src->tim, sizeof(struct rtllib_tim_parameters));
dst->bssht.bd_support_ht = src->bssht.bd_support_ht;
dst->bssht.bd_rt2rt_aggregation = src->bssht.bd_rt2rt_aggregation;
dst->bssht.bd_ht_cap_len = src->bssht.bd_ht_cap_len;
memcpy(dst->bssht.bd_ht_cap_buf, src->bssht.bd_ht_cap_buf,
src->bssht.bd_ht_cap_len);
dst->bssht.bd_ht_info_len = src->bssht.bd_ht_info_len;
memcpy(dst->bssht.bd_ht_info_buf, src->bssht.bd_ht_info_buf,
src->bssht.bd_ht_info_len);
dst->bssht.bd_ht_spec_ver = src->bssht.bd_ht_spec_ver;
dst->bssht.bd_rt2rt_long_slot_time = src->bssht.bd_rt2rt_long_slot_time;
dst->broadcom_cap_exist = src->broadcom_cap_exist;
dst->ralink_cap_exist = src->ralink_cap_exist;
dst->atheros_cap_exist = src->atheros_cap_exist;
dst->realtek_cap_exit = src->realtek_cap_exit;
dst->marvell_cap_exist = src->marvell_cap_exist;
dst->cisco_cap_exist = src->cisco_cap_exist;
dst->airgo_cap_exist = src->airgo_cap_exist;
dst->unknown_cap_exist = src->unknown_cap_exist;
memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
dst->wpa_ie_len = src->wpa_ie_len;
memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len);
dst->rsn_ie_len = src->rsn_ie_len;
memcpy(dst->wzc_ie, src->wzc_ie, src->wzc_ie_len);
dst->wzc_ie_len = src->wzc_ie_len;
dst->last_scanned = jiffies;
/* qos related parameters */
qos_active = dst->qos_data.active;
old_param = dst->qos_data.param_count;
dst->qos_data.supported = src->qos_data.supported;
if (dst->flags & NETWORK_HAS_QOS_PARAMETERS)
memcpy(&dst->qos_data, &src->qos_data,
sizeof(struct rtllib_qos_data));
if (dst->qos_data.supported == 1) {
if (dst->ssid_len)
netdev_dbg(ieee->dev,
"QoS the network %s is QoS supported\n",
dst->ssid);
else
netdev_dbg(ieee->dev,
"QoS the network is QoS supported\n");
}
dst->qos_data.active = qos_active;
dst->qos_data.old_param_count = old_param;
dst->wmm_info = src->wmm_info;
if (src->wmm_param[0].ac_aci_acm_aifsn ||
src->wmm_param[1].ac_aci_acm_aifsn ||
src->wmm_param[2].ac_aci_acm_aifsn ||
src->wmm_param[3].ac_aci_acm_aifsn)
memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
dst->SignalStrength = src->SignalStrength;
dst->RSSI = src->RSSI;
dst->Turbo_Enable = src->Turbo_Enable;
dst->CountryIeLen = src->CountryIeLen;
memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen);
dst->bWithAironetIE = src->bWithAironetIE;
dst->bCkipSupported = src->bCkipSupported;
memcpy(dst->CcxRmState, src->CcxRmState, 2);
dst->bCcxRmEnable = src->bCcxRmEnable;
dst->MBssidMask = src->MBssidMask;
dst->bMBssidValid = src->bMBssidValid;
memcpy(dst->MBssid, src->MBssid, 6);
dst->bWithCcxVerNum = src->bWithCcxVerNum;
dst->BssCcxVerNumber = src->BssCcxVerNumber;
}
static inline int is_beacon(u16 fc)
{
return (WLAN_FC_GET_STYPE(fc) == RTLLIB_STYPE_BEACON);
}
static int IsPassiveChannel(struct rtllib_device *rtllib, u8 channel)
{
if (channel > MAX_CHANNEL_NUMBER) {
netdev_info(rtllib->dev, "%s(): Invalid Channel\n", __func__);
return 0;
}
if (rtllib->active_channel_map[channel] == 2)
return 1;
return 0;
}
int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel)
{
if (channel > MAX_CHANNEL_NUMBER) {
netdev_info(rtllib->dev, "%s(): Invalid Channel\n", __func__);
return 0;
}
if (rtllib->active_channel_map[channel] > 0)
return 1;
return 0;
}
EXPORT_SYMBOL(rtllib_legal_channel);
static inline void rtllib_process_probe_response(
struct rtllib_device *ieee,
struct rtllib_probe_response *beacon,
struct rtllib_rx_stats *stats)
{
struct rtllib_network *target;
struct rtllib_network *oldest = NULL;
struct rtllib_info_element *info_element = &beacon->info_element[0];
unsigned long flags;
short renew;
struct rtllib_network *network = kzalloc(sizeof(struct rtllib_network),
GFP_ATOMIC);
u16 frame_ctl = le16_to_cpu(beacon->header.frame_ctl);
if (!network)
return;
netdev_dbg(ieee->dev,
"'%s' ( %pM ): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
escape_essid(info_element->data, info_element->len),
beacon->header.addr3,
(le16_to_cpu(beacon->capability) & (1 << 0xf)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0xe)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0xd)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0xc)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0xb)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0xa)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0x9)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0x8)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0x7)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0x6)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0x5)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0x4)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0x3)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0x2)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0x1)) ? '1' : '0',
(le16_to_cpu(beacon->capability) & (1 << 0x0)) ? '1' : '0');
if (rtllib_network_init(ieee, beacon, network, stats)) {
netdev_dbg(ieee->dev, "Dropped '%s' ( %pM) via %s.\n",
escape_essid(info_element->data, info_element->len),
beacon->header.addr3,
is_beacon(frame_ctl) ? "BEACON" : "PROBE RESPONSE");
goto free_network;
}
if (!rtllib_legal_channel(ieee, network->channel))
goto free_network;
if (WLAN_FC_GET_STYPE(frame_ctl) == RTLLIB_STYPE_PROBE_RESP) {
if (IsPassiveChannel(ieee, network->channel)) {
netdev_info(ieee->dev,
"GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n",
network->channel);
goto free_network;
}
}
/* The network parsed correctly -- so now we scan our known networks
* to see if we can find it in our list.
*
* NOTE: This search is definitely not optimized. Once its doing
* the "right thing" we'll optimize it for efficiency if
* necessary
*/
/* Search for this entry in the list and update it if it is
* already there.
*/
spin_lock_irqsave(&ieee->lock, flags);
if (is_same_network(&ieee->current_network, network,
(network->ssid_len ? 1 : 0))) {
update_network(ieee, &ieee->current_network, network);
if ((ieee->current_network.mode == WIRELESS_MODE_N_24G ||
ieee->current_network.mode == WIRELESS_MODE_G) &&
ieee->current_network.berp_info_valid) {
if (ieee->current_network.erp_value & ERP_UseProtection)
ieee->current_network.buseprotection = true;
else
ieee->current_network.buseprotection = false;
}
if (is_beacon(frame_ctl)) {
if (ieee->link_state >= MAC80211_LINKED)
ieee->link_detect_info.NumRecvBcnInPeriod++;
}
}
list_for_each_entry(target, &ieee->network_list, list) {
if (is_same_network(target, network,
(target->ssid_len ? 1 : 0)))
break;
if ((oldest == NULL) ||
(target->last_scanned < oldest->last_scanned))
oldest = target;
}
/* If we didn't find a match, then get a new network slot to initialize
* with this beacon's information
*/
if (&target->list == &ieee->network_list) {
if (list_empty(&ieee->network_free_list)) {
/* If there are no more slots, expire the oldest */
list_del(&oldest->list);
target = oldest;
netdev_dbg(ieee->dev,
"Expired '%s' ( %pM) from network list.\n",
escape_essid(target->ssid, target->ssid_len),
target->bssid);
} else {
/* Otherwise just pull from the free list */
target = list_entry(ieee->network_free_list.next,
struct rtllib_network, list);
list_del(ieee->network_free_list.next);
}
netdev_dbg(ieee->dev, "Adding '%s' ( %pM) via %s.\n",
escape_essid(network->ssid, network->ssid_len),
network->bssid,
is_beacon(frame_ctl) ? "BEACON" : "PROBE RESPONSE");
memcpy(target, network, sizeof(*target));
list_add_tail(&target->list, &ieee->network_list);
if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)
rtllib_softmac_new_net(ieee, network);
} else {
netdev_dbg(ieee->dev, "Updating '%s' ( %pM) via %s.\n",
escape_essid(target->ssid, target->ssid_len),
target->bssid,
is_beacon(frame_ctl) ? "BEACON" : "PROBE RESPONSE");
/* we have an entry and we are going to update it. But this
* entry may be already expired. In this case we do the same
* as we found a new net and call the new_net handler
*/
renew = !time_after(target->last_scanned + ieee->scan_age,
jiffies);
if ((!target->ssid_len) &&
(((network->ssid_len > 0) && (target->hidden_ssid_len == 0))
|| ((ieee->current_network.ssid_len == network->ssid_len) &&
(strncmp(ieee->current_network.ssid, network->ssid,
network->ssid_len) == 0) &&
(ieee->link_state == MAC80211_NOLINK))))
renew = 1;
update_network(ieee, target, network);
if (renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE))
rtllib_softmac_new_net(ieee, network);
}
spin_unlock_irqrestore(&ieee->lock, flags);
if (is_beacon(frame_ctl) &&
is_same_network(&ieee->current_network, network,
(network->ssid_len ? 1 : 0)) &&
(ieee->link_state == MAC80211_LINKED)) {
ieee->handle_beacon(ieee->dev, beacon, &ieee->current_network);
}
free_network:
kfree(network);
}
static void rtllib_rx_mgt(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *stats)
{
struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data;
if ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) !=
RTLLIB_STYPE_PROBE_RESP) &&
(WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) !=
RTLLIB_STYPE_BEACON))
ieee->last_rx_ps_time = jiffies;
switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) {
case RTLLIB_STYPE_BEACON:
netdev_dbg(ieee->dev, "received BEACON (%d)\n",
WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
rtllib_process_probe_response(
ieee, (struct rtllib_probe_response *)header,
stats);
if (ieee->sta_sleep || (ieee->ps != RTLLIB_PS_DISABLED &&
ieee->iw_mode == IW_MODE_INFRA &&
ieee->link_state == MAC80211_LINKED))
schedule_work(&ieee->ps_task);
break;
case RTLLIB_STYPE_PROBE_RESP:
netdev_dbg(ieee->dev, "received PROBE RESPONSE (%d)\n",
WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
rtllib_process_probe_response(ieee,
(struct rtllib_probe_response *)header, stats);
break;
case RTLLIB_STYPE_PROBE_REQ:
netdev_dbg(ieee->dev, "received PROBE REQUEST (%d)\n",
WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
(ieee->iw_mode == IW_MODE_ADHOC &&
ieee->link_state == MAC80211_LINKED))
rtllib_rx_probe_rq(ieee, skb);
break;
}
}
| linux-master | drivers/staging/rtl8192e/rtllib_rx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2004 Intel Corporation. All rights reserved.
*
* Portions of this file are based on the WEP enablement code provided by the
* Host AP project hostap-drivers v0.1.3
* Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
* <[email protected]>
* Copyright (c) 2002-2003, Jouni Malinen <[email protected]>
*
* Contact Information:
* James P. Ketrenos <[email protected]>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include <linux/wireless.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include "rtllib.h"
static const char * const rtllib_modes[] = {
"a", "b", "g", "?", "N-24G"
};
#define MAX_CUSTOM_LEN 64
static inline char *rtl819x_translate_scan(struct rtllib_device *ieee,
char *start, char *stop,
struct rtllib_network *network,
struct iw_request_info *info)
{
char custom[MAX_CUSTOM_LEN];
char proto_name[IFNAMSIZ];
char *pname = proto_name;
char *p;
struct iw_event iwe;
int i, j;
u16 max_rate, rate;
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
/* First entry *MUST* be the AP MAC address */
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
ether_addr_copy(iwe.u.ap_addr.sa_data, network->bssid);
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
/* Remaining entries will be displayed in the order we provide them */
/* Add the ESSID */
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
if (network->ssid_len > 0) {
iwe.u.data.length = min_t(u8, network->ssid_len, 32);
start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
} else if (network->hidden_ssid_len == 0) {
iwe.u.data.length = sizeof("<hidden>");
start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>");
} else {
iwe.u.data.length = min_t(u8, network->hidden_ssid_len, 32);
start = iwe_stream_add_point(info, start, stop, &iwe, network->hidden_ssid);
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
for (i = 0; i < ARRAY_SIZE(rtllib_modes); i++) {
if (network->mode & BIT(i)) {
strcpy(pname, rtllib_modes[i]);
pname += strlen(rtllib_modes[i]);
}
}
*pname = '\0';
snprintf(iwe.u.name, IFNAMSIZ, "IEEE802.11%s", proto_name);
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
/* Add mode */
iwe.cmd = SIOCGIWMODE;
if (network->capability &
(WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
if (network->capability & WLAN_CAPABILITY_ESS)
iwe.u.mode = IW_MODE_MASTER;
else
iwe.u.mode = IW_MODE_ADHOC;
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
}
/* Add frequency/channel */
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = network->channel;
iwe.u.freq.e = 0;
iwe.u.freq.i = 0;
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
/* Add encryption capability */
iwe.cmd = SIOCGIWENCODE;
if (network->capability & WLAN_CAPABILITY_PRIVACY)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
/* Add basic and extended rates */
max_rate = 0;
p = custom;
p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
for (i = 0, j = 0; i < network->rates_len;) {
if (j < network->rates_ex_len &&
((network->rates_ex[j] & 0x7F) <
(network->rates[i] & 0x7F)))
rate = network->rates_ex[j++] & 0x7F;
else
rate = network->rates[i++] & 0x7F;
if (rate > max_rate)
max_rate = rate;
p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
}
for (; j < network->rates_ex_len; j++) {
rate = network->rates_ex[j] & 0x7F;
p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
if (rate > max_rate)
max_rate = rate;
}
if (network->mode >= WIRELESS_MODE_N_24G) {
struct ht_capab_ele *ht_cap = NULL;
bool is40M = false, isShortGI = false;
u8 max_mcs = 0;
if (!memcmp(network->bssht.bd_ht_cap_buf, EWC11NHTCap, 4))
ht_cap = (struct ht_capab_ele *)
&network->bssht.bd_ht_cap_buf[4];
else
ht_cap = (struct ht_capab_ele *)
&network->bssht.bd_ht_cap_buf[0];
is40M = (ht_cap->ChlWidth) ? 1 : 0;
isShortGI = (ht_cap->ChlWidth) ?
((ht_cap->ShortGI40Mhz) ? 1 : 0) :
((ht_cap->ShortGI20Mhz) ? 1 : 0);
max_mcs = HTGetHighestMCSRate(ieee, ht_cap->MCS,
MCS_FILTER_ALL);
rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs & 0x7f];
if (rate > max_rate)
max_rate = rate;
}
iwe.cmd = SIOCGIWRATE;
iwe.u.bitrate.disabled = 0;
iwe.u.bitrate.fixed = 0;
iwe.u.bitrate.value = max_rate * 500000;
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_PARAM_LEN);
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
/* Add quality statistics */
/* TODO: Fix these values... */
iwe.cmd = IWEVQUAL;
iwe.u.qual.qual = network->stats.signal;
iwe.u.qual.level = network->stats.rssi;
iwe.u.qual.noise = network->stats.noise;
iwe.u.qual.updated = network->stats.mask & RTLLIB_STATMASK_WEMASK;
if (!(network->stats.mask & RTLLIB_STATMASK_RSSI))
iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
if (!(network->stats.mask & RTLLIB_STATMASK_NOISE))
iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
if (!(network->stats.mask & RTLLIB_STATMASK_SIGNAL))
iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
iwe.u.qual.updated = 7;
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
iwe.cmd = IWEVCUSTOM;
p = custom;
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
memset(&iwe, 0, sizeof(iwe));
if (network->wpa_ie_len) {
char buf[MAX_WPA_IE_LEN];
memcpy(buf, network->wpa_ie, network->wpa_ie_len);
iwe.cmd = IWEVGENIE;
iwe.u.data.length = network->wpa_ie_len;
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
memset(&iwe, 0, sizeof(iwe));
if (network->rsn_ie_len) {
char buf[MAX_WPA_IE_LEN];
memcpy(buf, network->rsn_ie, network->rsn_ie_len);
iwe.cmd = IWEVGENIE;
iwe.u.data.length = network->rsn_ie_len;
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
/* add info for WZC */
memset(&iwe, 0, sizeof(iwe));
if (network->wzc_ie_len) {
char buf[MAX_WZC_IE_LEN];
memcpy(buf, network->wzc_ie, network->wzc_ie_len);
iwe.cmd = IWEVGENIE;
iwe.u.data.length = network->wzc_ie_len;
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
/* Add EXTRA: Age to display seconds since last beacon/probe response
* for given network.
*/
iwe.cmd = IWEVCUSTOM;
p = custom;
p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
" Last beacon: %lums ago",
(100 * (jiffies - network->last_scanned)) / HZ);
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
return start;
}
int rtllib_wx_get_scan(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct rtllib_network *network;
unsigned long flags;
char *ev = extra;
char *stop = ev + wrqu->data.length;
int i = 0;
int err = 0;
netdev_dbg(ieee->dev, "Getting scan\n");
mutex_lock(&ieee->wx_mutex);
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(network, &ieee->network_list, list) {
i++;
if ((stop - ev) < 200) {
err = -E2BIG;
break;
}
if (ieee->scan_age == 0 ||
time_after(network->last_scanned + ieee->scan_age, jiffies))
ev = rtl819x_translate_scan(ieee, ev, stop, network,
info);
else
netdev_dbg(ieee->dev,
"Network '%s ( %pM)' hidden due to age (%lums).\n",
escape_essid(network->ssid,
network->ssid_len),
network->bssid,
(100 * (jiffies - network->last_scanned)) /
HZ);
}
spin_unlock_irqrestore(&ieee->lock, flags);
mutex_unlock(&ieee->wx_mutex);
wrqu->data.length = ev - extra;
wrqu->data.flags = 0;
netdev_dbg(ieee->dev, "%s(): %d networks returned.\n", __func__, i);
return err;
}
EXPORT_SYMBOL(rtllib_wx_get_scan);
int rtllib_wx_set_encode(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
struct iw_point *erq = &wrqu->encoding;
struct net_device *dev = ieee->dev;
struct rtllib_security sec = {
.flags = 0
};
int i, key, key_provided, len;
struct lib80211_crypt_data **crypt;
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
if (key > NUM_WEP_KEYS)
return -EINVAL;
key--;
key_provided = 1;
} else {
key_provided = 0;
key = ieee->crypt_info.tx_keyidx;
}
netdev_dbg(ieee->dev, "Key: %d [%s]\n", key, key_provided ?
"provided" : "default");
crypt = &ieee->crypt_info.crypt[key];
if (erq->flags & IW_ENCODE_DISABLED) {
if (key_provided && *crypt) {
netdev_dbg(ieee->dev,
"Disabling encryption on key %d.\n", key);
lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
} else {
netdev_dbg(ieee->dev, "Disabling encryption.\n");
}
/* Check all the keys to see if any are still configured,
* and if no key index was provided, de-init them all
*/
for (i = 0; i < NUM_WEP_KEYS; i++) {
if (ieee->crypt_info.crypt[i]) {
if (key_provided)
break;
lib80211_crypt_delayed_deinit(&ieee->crypt_info,
&ieee->crypt_info.crypt[i]);
}
}
if (i == NUM_WEP_KEYS) {
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_ENABLED | SEC_LEVEL;
}
goto done;
}
sec.enabled = 1;
sec.flags |= SEC_ENABLED;
if (*crypt && (*crypt)->ops &&
strcmp((*crypt)->ops->name, "R-WEP") != 0) {
/* changing to use WEP; deinit previously used algorithm
* on this key
*/
lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
}
if (!*crypt) {
struct lib80211_crypt_data *new_crypt;
/* take WEP into use */
new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
if (!new_crypt)
return -ENOMEM;
new_crypt->ops = lib80211_get_crypto_ops("R-WEP");
if (!new_crypt->ops) {
request_module("rtllib_crypt_wep");
new_crypt->ops = lib80211_get_crypto_ops("R-WEP");
}
if (new_crypt->ops)
new_crypt->priv = new_crypt->ops->init(key);
if (!new_crypt->ops || !new_crypt->priv) {
kfree(new_crypt);
new_crypt = NULL;
netdev_warn(dev,
"%s: could not initialize WEP: load module rtllib_crypt_wep\n",
dev->name);
return -EOPNOTSUPP;
}
*crypt = new_crypt;
}
/* If a new key was provided, set it up */
if (erq->length > 0) {
len = erq->length <= 5 ? 5 : 13;
memcpy(sec.keys[key], keybuf, erq->length);
if (len > erq->length)
memset(sec.keys[key] + erq->length, 0,
len - erq->length);
netdev_dbg(ieee->dev, "Setting key %d to '%s' (%d:%d bytes)\n",
key, escape_essid(sec.keys[key], len), erq->length,
len);
sec.key_sizes[key] = len;
(*crypt)->ops->set_key(sec.keys[key], len, NULL,
(*crypt)->priv);
sec.flags |= (1 << key);
/* This ensures a key will be activated if no key is
* explicitly set
*/
if (key == sec.active_key)
sec.flags |= SEC_ACTIVE_KEY;
ieee->crypt_info.tx_keyidx = key;
} else {
len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
NULL, (*crypt)->priv);
if (len == 0) {
/* Set a default key of all 0 */
netdev_info(ieee->dev, "Setting key %d to all zero.\n", key);
memset(sec.keys[key], 0, 13);
(*crypt)->ops->set_key(sec.keys[key], 13, NULL,
(*crypt)->priv);
sec.key_sizes[key] = 13;
sec.flags |= (1 << key);
}
/* No key data - just set the default TX key index */
if (key_provided) {
netdev_dbg(ieee->dev,
"Setting key %d as default Tx key.\n", key);
ieee->crypt_info.tx_keyidx = key;
sec.active_key = key;
sec.flags |= SEC_ACTIVE_KEY;
}
}
done:
ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
ieee->auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN :
WLAN_AUTH_SHARED_KEY;
sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
sec.flags |= SEC_AUTH_MODE;
netdev_dbg(ieee->dev, "Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ?
"OPEN" : "SHARED KEY");
/* For now we just support WEP, so only set that security level...
* TODO: When WPA is added this is one place that needs to change
*/
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
return 0;
}
EXPORT_SYMBOL(rtllib_wx_set_encode);
int rtllib_wx_get_encode(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
struct iw_point *erq = &wrqu->encoding;
int len, key;
struct lib80211_crypt_data *crypt;
if (ieee->iw_mode == IW_MODE_MONITOR)
return -1;
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
if (key > NUM_WEP_KEYS)
return -EINVAL;
key--;
} else {
key = ieee->crypt_info.tx_keyidx;
}
crypt = ieee->crypt_info.crypt[key];
erq->flags = key + 1;
if (!crypt || !crypt->ops) {
erq->length = 0;
erq->flags |= IW_ENCODE_DISABLED;
return 0;
}
len = crypt->ops->get_key(keybuf, SCM_KEY_LEN, NULL, crypt->priv);
erq->length = max(len, 0);
erq->flags |= IW_ENCODE_ENABLED;
if (ieee->open_wep)
erq->flags |= IW_ENCODE_OPEN;
else
erq->flags |= IW_ENCODE_RESTRICTED;
return 0;
}
EXPORT_SYMBOL(rtllib_wx_get_encode);
int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
struct net_device *dev = ieee->dev;
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
int i, idx;
int group_key = 0;
const char *alg, *module;
struct lib80211_crypto_ops *ops;
struct lib80211_crypt_data **crypt;
struct rtllib_security sec = {
.flags = 0,
};
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if (idx < 1 || idx > NUM_WEP_KEYS)
return -EINVAL;
idx--;
} else {
idx = ieee->crypt_info.tx_keyidx;
}
if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
crypt = &ieee->crypt_info.crypt[idx];
group_key = 1;
} else {
/* some Cisco APs use idx>0 for unicast in dynamic WEP */
if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
return -EINVAL;
if (ieee->iw_mode == IW_MODE_INFRA)
crypt = &ieee->crypt_info.crypt[idx];
else
return -EINVAL;
}
sec.flags |= SEC_ENABLED;
if ((encoding->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) {
if (*crypt)
lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
for (i = 0; i < NUM_WEP_KEYS; i++) {
if (ieee->crypt_info.crypt[i])
break;
}
if (i == NUM_WEP_KEYS) {
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_LEVEL;
}
goto done;
}
sec.enabled = 1;
switch (ext->alg) {
case IW_ENCODE_ALG_WEP:
alg = "R-WEP";
module = "rtllib_crypt_wep";
break;
case IW_ENCODE_ALG_TKIP:
alg = "R-TKIP";
module = "rtllib_crypt_tkip";
break;
case IW_ENCODE_ALG_CCMP:
alg = "R-CCMP";
module = "rtllib_crypt_ccmp";
break;
default:
netdev_dbg(ieee->dev, "Unknown crypto alg %d\n", ext->alg);
ret = -EINVAL;
goto done;
}
netdev_dbg(dev, "alg name:%s\n", alg);
ops = lib80211_get_crypto_ops(alg);
if (!ops) {
char tempbuf[100];
memset(tempbuf, 0x00, 100);
sprintf(tempbuf, "%s", module);
request_module("%s", tempbuf);
ops = lib80211_get_crypto_ops(alg);
}
if (!ops) {
netdev_info(dev, "========>unknown crypto alg %d\n", ext->alg);
ret = -EINVAL;
goto done;
}
if (!*crypt || (*crypt)->ops != ops) {
struct lib80211_crypt_data *new_crypt;
lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
if (!new_crypt) {
ret = -ENOMEM;
goto done;
}
new_crypt->ops = ops;
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv = new_crypt->ops->init(idx);
if (!new_crypt->priv) {
kfree(new_crypt);
ret = -EINVAL;
goto done;
}
*crypt = new_crypt;
}
if (ext->key_len > 0 && (*crypt)->ops->set_key &&
(*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
(*crypt)->priv) < 0) {
netdev_info(dev, "key setting failed\n");
ret = -EINVAL;
goto done;
}
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
ieee->crypt_info.tx_keyidx = idx;
sec.active_key = idx;
sec.flags |= SEC_ACTIVE_KEY;
}
if (ext->alg != IW_ENCODE_ALG_NONE) {
sec.key_sizes[idx] = ext->key_len;
sec.flags |= (1 << idx);
if (ext->alg == IW_ENCODE_ALG_WEP) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
} else if (ext->alg == IW_ENCODE_ALG_TKIP) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_2;
} else if (ext->alg == IW_ENCODE_ALG_CCMP) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_3;
}
/* Don't set sec level for group keys. */
if (group_key)
sec.flags &= ~SEC_LEVEL;
}
done:
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_encode_ext);
int rtllib_wx_set_mlme(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
u8 i = 0;
bool deauth = false;
struct iw_mlme *mlme = (struct iw_mlme *)extra;
if (ieee->link_state != MAC80211_LINKED)
return -ENOLINK;
mutex_lock(&ieee->wx_mutex);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
deauth = true;
fallthrough;
case IW_MLME_DISASSOC:
if (deauth)
netdev_info(ieee->dev, "disauth packet !\n");
else
netdev_info(ieee->dev, "dis associate packet!\n");
ieee->cannot_notify = true;
SendDisassociation(ieee, deauth, mlme->reason_code);
rtllib_disassociate(ieee);
ieee->wap_set = 0;
for (i = 0; i < 6; i++)
ieee->current_network.bssid[i] = 0x55;
ieee->ssid_set = 0;
ieee->current_network.ssid[0] = '\0';
ieee->current_network.ssid_len = 0;
break;
default:
mutex_unlock(&ieee->wx_mutex);
return -EOPNOTSUPP;
}
mutex_unlock(&ieee->wx_mutex);
return 0;
}
EXPORT_SYMBOL(rtllib_wx_set_mlme);
int rtllib_wx_set_auth(struct rtllib_device *ieee,
struct iw_request_info *info,
struct iw_param *data, char *extra)
{
switch (data->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
break;
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_KEY_MGMT:
/* Host AP driver does not use these parameters and allows
* wpa_supplicant to control them internally.
*/
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
ieee->tkip_countermeasures = data->value;
break;
case IW_AUTH_DROP_UNENCRYPTED:
ieee->drop_unencrypted = data->value;
break;
case IW_AUTH_80211_AUTH_ALG:
if (data->value & IW_AUTH_ALG_SHARED_KEY) {
ieee->open_wep = 0;
ieee->auth_mode = 1;
} else if (data->value & IW_AUTH_ALG_OPEN_SYSTEM) {
ieee->open_wep = 1;
ieee->auth_mode = 0;
} else if (data->value & IW_AUTH_ALG_LEAP) {
ieee->open_wep = 1;
ieee->auth_mode = 2;
} else {
return -EINVAL;
}
break;
case IW_AUTH_WPA_ENABLED:
ieee->wpa_enabled = (data->value) ? 1 : 0;
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
ieee->ieee802_1x = data->value;
break;
case IW_AUTH_PRIVACY_INVOKED:
ieee->privacy_invoked = data->value;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
EXPORT_SYMBOL(rtllib_wx_set_auth);
int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
{
u8 *buf;
u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
if (len > MAX_WPA_IE_LEN || (len && !ie))
return -EINVAL;
if (len) {
eid = ie[0];
if ((eid == MFIE_TYPE_GENERIC) && (!memcmp(&ie[2], wps_oui, 4))) {
ieee->wps_ie_len = min_t(size_t, len, MAX_WZC_IE_LEN);
buf = kmemdup(ie, ieee->wps_ie_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ieee->wps_ie = buf;
return 0;
}
}
ieee->wps_ie_len = 0;
kfree(ieee->wps_ie);
ieee->wps_ie = NULL;
if (len) {
if (len != ie[1] + 2)
return -EINVAL;
buf = kmemdup(ie, len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = len;
} else {
kfree(ieee->wpa_ie);
ieee->wpa_ie = NULL;
ieee->wpa_ie_len = 0;
}
return 0;
}
EXPORT_SYMBOL(rtllib_wx_set_gen_ie);
| linux-master | drivers/staging/rtl8192e/rtllib_wx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2004 Intel Corporation. All rights reserved.
*
* Portions of this file are based on the WEP enablement code provided by the
* Host AP project hostap-drivers v0.1.3
* Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
* <[email protected]>
* Copyright (c) 2002-2003, Jouni Malinen <[email protected]>
*
* Contact Information:
* James P. Ketrenos <[email protected]>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
#include <net/arp.h>
#include "rtllib.h"
u32 rt_global_debug_component = COMP_ERR;
EXPORT_SYMBOL(rt_global_debug_component);
static inline int rtllib_networks_allocate(struct rtllib_device *ieee)
{
if (ieee->networks)
return 0;
ieee->networks = kcalloc(MAX_NETWORK_COUNT,
sizeof(struct rtllib_network), GFP_KERNEL);
if (!ieee->networks)
return -ENOMEM;
return 0;
}
static inline void rtllib_networks_free(struct rtllib_device *ieee)
{
if (!ieee->networks)
return;
kfree(ieee->networks);
ieee->networks = NULL;
}
static inline void rtllib_networks_initialize(struct rtllib_device *ieee)
{
int i;
INIT_LIST_HEAD(&ieee->network_free_list);
INIT_LIST_HEAD(&ieee->network_list);
for (i = 0; i < MAX_NETWORK_COUNT; i++)
list_add_tail(&ieee->networks[i].list,
&ieee->network_free_list);
}
struct net_device *alloc_rtllib(int sizeof_priv)
{
struct rtllib_device *ieee = NULL;
struct net_device *dev;
int i, err;
pr_debug("rtllib: Initializing...\n");
dev = alloc_etherdev(sizeof(struct rtllib_device) + sizeof_priv);
if (!dev) {
pr_err("Unable to allocate net_device.\n");
return NULL;
}
ieee = (struct rtllib_device *)netdev_priv_rsl(dev);
ieee->dev = dev;
err = rtllib_networks_allocate(ieee);
if (err) {
pr_err("Unable to allocate beacon storage: %d\n", err);
goto free_netdev;
}
rtllib_networks_initialize(ieee);
/* Default fragmentation threshold is maximum payload size */
ieee->fts = DEFAULT_FTS;
ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
ieee->open_wep = 1;
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
ieee->rtllib_ap_sec_type = rtllib_ap_sec_type;
spin_lock_init(&ieee->lock);
spin_lock_init(&ieee->wpax_suitlist_lock);
spin_lock_init(&ieee->reorder_spinlock);
atomic_set(&ieee->atm_swbw, 0);
/* SAM FIXME */
lib80211_crypt_info_init(&ieee->crypt_info, "RTLLIB", &ieee->lock);
ieee->wpa_enabled = 0;
ieee->tkip_countermeasures = 0;
ieee->drop_unencrypted = 0;
ieee->privacy_invoked = 0;
ieee->ieee802_1x = 1;
ieee->raw_tx = 0;
ieee->hwsec_active = 0;
memset(ieee->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
err = rtllib_softmac_init(ieee);
if (err)
goto free_crypt_info;
ieee->ht_info = kzalloc(sizeof(struct rt_hi_throughput), GFP_KERNEL);
if (!ieee->ht_info)
goto free_softmac;
HTUpdateDefaultSetting(ieee);
HTInitializeHTInfo(ieee);
TSInitialize(ieee);
for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
for (i = 0; i < 17; i++) {
ieee->last_rxseq_num[i] = -1;
ieee->last_rxfrag_num[i] = -1;
ieee->last_packet_time[i] = 0;
}
return dev;
free_softmac:
rtllib_softmac_free(ieee);
free_crypt_info:
lib80211_crypt_info_free(&ieee->crypt_info);
rtllib_networks_free(ieee);
free_netdev:
free_netdev(dev);
return NULL;
}
EXPORT_SYMBOL(alloc_rtllib);
void free_rtllib(struct net_device *dev)
{
struct rtllib_device *ieee = (struct rtllib_device *)
netdev_priv_rsl(dev);
kfree(ieee->ht_info);
rtllib_softmac_free(ieee);
lib80211_crypt_info_free(&ieee->crypt_info);
rtllib_networks_free(ieee);
free_netdev(dev);
}
EXPORT_SYMBOL(free_rtllib);
static int __init rtllib_init(void)
{
return 0;
}
static void __exit rtllib_exit(void)
{
}
module_init(rtllib_init);
module_exit(rtllib_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/rtl8192e/rtllib_module.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
*
* Contact Information:
* James P. Ketrenos <[email protected]>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* Few modifications for Realtek's Wi-Fi drivers by
* Andrea Merello <[email protected]>
*
* A special thanks goes to Realtek for their support !
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
#include <linux/if_vlan.h>
#include "rtllib.h"
/* 802.11 Data Frame
*
*
* 802.11 frame_control for data frames - 2 bytes
* ,--------------------------------------------------------------------.
* bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
* |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
* val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
* |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
* desc | ver | type | ^-subtype-^ |to |from|more|retry| pwr |more |wep |
* | | | x=0 data |DS | DS |frag| | mgm |data | |
* | | | x=1 data+ack | | | | | | | |
* '--------------------------------------------------------------------'
* /\
* |
* 802.11 Data Frame |
* ,--------- 'ctrl' expands to >---'
* |
* ,--'---,-------------------------------------------------------------.
* Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
* |------|------|---------|---------|---------|------|---------|------|
* Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
* | | tion | (BSSID) | | | ence | data | |
* `--------------------------------------------------| |------'
* Total: 28 non-data bytes `----.----'
* |
* .- 'Frame data' expands to <---------------------------'
* |
* V
* ,---------------------------------------------------.
* Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
* |------|------|---------|----------|------|---------|
* Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
* | DSAP | SSAP | | | | Packet |
* | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
* `-----------------------------------------| |
* Total: 8 non-data bytes `----.----'
* |
* .- 'IP Packet' expands, if WEP enabled, to <--'
* |
* V
* ,-----------------------.
* Bytes | 4 | 0-2296 | 4 |
* |-----|-----------|-----|
* Desc. | IV | Encrypted | ICV |
* | | IP Packet | |
* `-----------------------'
* Total: 8 non-data bytes
*
*
* 802.3 Ethernet Data Frame
*
* ,-----------------------------------------.
* Bytes | 6 | 6 | 2 | Variable | 4 |
* |-------|-------|------|-----------|------|
* Desc. | Dest. | Source| Type | IP Packet | fcs |
* | MAC | MAC | | | |
* `-----------------------------------------'
* Total: 18 non-data bytes
*
* In the event that fragmentation is required, the incoming payload is split
* into N parts of size ieee->fts. The first fragment contains the SNAP header
* and the remaining packets are just data.
*
* If encryption is enabled, each fragment payload size is reduced by enough
* space to add the prefix and postfix (IV and ICV totalling 8 bytes in
* the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to
* 500 without encryption it will take 3 frames. With WEP it will take 4 frames
* as the payload of each frame is reduced to 492 bytes.
*
* SKB visualization
*
* ,- skb->data
* |
* | ETHERNET HEADER ,-<-- PAYLOAD
* | | 14 bytes from skb->data
* | 2 bytes for Type --> ,T. | (sizeof ethhdr)
* | | | |
* |,-Dest.--. ,--Src.---. | | |
* | 6 bytes| | 6 bytes | | | |
* v | | | | | |
* 0 | v 1 | v | v 2
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
* ^ | ^ | ^ |
* | | | | | |
* | | | | `T' <---- 2 bytes for Type
* | | | |
* | | '---SNAP--' <-------- 6 bytes for SNAP
* | |
* `-IV--' <-------------------- 4 bytes for IV (WEP)
*
* SNAP HEADER
*
*/
static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
static int rtllib_put_snap(u8 *data, u16 h_proto)
{
struct rtllib_snap_hdr *snap;
u8 *oui;
snap = (struct rtllib_snap_hdr *)data;
snap->dsap = 0xaa;
snap->ssap = 0xaa;
snap->ctrl = 0x03;
if (h_proto == 0x8137 || h_proto == 0x80f3)
oui = P802_1H_OUI;
else
oui = RFC1042_OUI;
snap->oui[0] = oui[0];
snap->oui[1] = oui[1];
snap->oui[2] = oui[2];
*(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
return SNAP_SIZE + sizeof(u16);
}
int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
int hdr_len)
{
struct lib80211_crypt_data *crypt = NULL;
int res;
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (!(crypt && crypt->ops)) {
netdev_info(ieee->dev, "=========>%s(), crypt is null\n",
__func__);
return -1;
}
/* To encrypt, frame format is:
* IV (4 bytes), clear payload (including SNAP), ICV (4 bytes)
*/
/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
* call both MSDU and MPDU encryption functions from here.
*/
atomic_inc(&crypt->refcnt);
res = 0;
if (crypt->ops->encrypt_msdu)
res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
if (res == 0 && crypt->ops->encrypt_mpdu)
res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
netdev_info(ieee->dev, "%s: Encryption failed: len=%d.\n",
ieee->dev->name, frag->len);
return -1;
}
return 0;
}
void rtllib_txb_free(struct rtllib_txb *txb)
{
if (unlikely(!txb))
return;
kfree(txb);
}
static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
gfp_t gfp_mask)
{
struct rtllib_txb *txb;
int i;
txb = kzalloc(struct_size(txb, fragments, nr_frags), gfp_mask);
if (!txb)
return NULL;
txb->nr_frags = nr_frags;
txb->frag_size = cpu_to_le16(txb_size);
for (i = 0; i < nr_frags; i++) {
txb->fragments[i] = dev_alloc_skb(txb_size);
if (unlikely(!txb->fragments[i]))
goto err_free;
memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
}
return txb;
err_free:
while (--i >= 0)
dev_kfree_skb_any(txb->fragments[i]);
kfree(txb);
return NULL;
}
static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
{
struct ethhdr *eth;
struct iphdr *ip;
eth = (struct ethhdr *)skb->data;
if (eth->h_proto != htons(ETH_P_IP))
return 0;
#ifdef VERBOSE_DEBUG
print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE, skb->data,
skb->len);
#endif
ip = ip_hdr(skb);
switch (ip->tos & 0xfc) {
case 0x20:
return 2;
case 0x40:
return 1;
case 0x60:
return 3;
case 0x80:
return 4;
case 0xa0:
return 5;
case 0xc0:
return 6;
case 0xe0:
return 7;
default:
return 0;
}
}
static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
struct sk_buff *skb,
struct cb_desc *tcb_desc)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
struct tx_ts_record *pTxTs = NULL;
struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
if (rtllib_act_scanning(ieee, false))
return;
if (!ht_info->bCurrentHTSupport || !ht_info->enable_ht)
return;
if (!IsQoSDataFrame(skb->data))
return;
if (is_multicast_ether_addr(hdr->addr1))
return;
if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
return;
if (ht_info->iot_action & HT_IOT_ACT_TX_NO_AGGREGATION)
return;
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
return;
if (ht_info->bCurrentAMPDUEnable) {
if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
skb->priority, TX_DIR, true)) {
netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
return;
}
if (!pTxTs->TxAdmittedBARecord.b_valid) {
if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
KEY_TYPE_NA)) {
;
} else if (tcb_desc->bdhcp == 1) {
;
} else if (!pTxTs->bDisable_AddBa) {
TsStartAddBaProcess(ieee, pTxTs);
}
goto FORCED_AGG_SETTING;
} else if (!pTxTs->bUsingBa) {
if (SN_LESS(pTxTs->TxAdmittedBARecord.ba_start_seq_ctrl.field.seq_num,
(pTxTs->TxCurSeq + 1) % 4096))
pTxTs->bUsingBa = true;
else
goto FORCED_AGG_SETTING;
}
if (ieee->iw_mode == IW_MODE_INFRA) {
tcb_desc->bAMPDUEnable = true;
tcb_desc->ampdu_factor = ht_info->CurrentAMPDUFactor;
tcb_desc->ampdu_density = ht_info->current_mpdu_density;
}
}
FORCED_AGG_SETTING:
switch (ht_info->ForcedAMPDUMode) {
case HT_AGG_AUTO:
break;
case HT_AGG_FORCE_ENABLE:
tcb_desc->bAMPDUEnable = true;
tcb_desc->ampdu_density = ht_info->forced_mpdu_density;
tcb_desc->ampdu_factor = ht_info->forced_ampdu_factor;
break;
case HT_AGG_FORCE_DISABLE:
tcb_desc->bAMPDUEnable = false;
tcb_desc->ampdu_density = 0;
tcb_desc->ampdu_factor = 0;
break;
}
}
static void rtllib_query_ShortPreambleMode(struct rtllib_device *ieee,
struct cb_desc *tcb_desc)
{
tcb_desc->bUseShortPreamble = false;
if (tcb_desc->data_rate == 2)
return;
else if (ieee->current_network.capability &
WLAN_CAPABILITY_SHORT_PREAMBLE)
tcb_desc->bUseShortPreamble = true;
}
static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
struct cb_desc *tcb_desc)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
tcb_desc->bUseShortGI = false;
if (!ht_info->bCurrentHTSupport || !ht_info->enable_ht)
return;
if (ht_info->forced_short_gi) {
tcb_desc->bUseShortGI = true;
return;
}
if (ht_info->bCurBW40MHz && ht_info->bCurShortGI40MHz)
tcb_desc->bUseShortGI = true;
else if (!ht_info->bCurBW40MHz && ht_info->bCurShortGI20MHz)
tcb_desc->bUseShortGI = true;
}
static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
struct cb_desc *tcb_desc)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
tcb_desc->bPacketBW = false;
if (!ht_info->bCurrentHTSupport || !ht_info->enable_ht)
return;
if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
return;
if ((tcb_desc->data_rate & 0x80) == 0)
return;
if (ht_info->bCurBW40MHz && ht_info->cur_tx_bw40mhz &&
!ieee->bandwidth_auto_switch.bforced_tx20Mhz)
tcb_desc->bPacketBW = true;
}
static void rtllib_query_protectionmode(struct rtllib_device *ieee,
struct cb_desc *tcb_desc,
struct sk_buff *skb)
{
struct rt_hi_throughput *ht_info;
tcb_desc->bRTSSTBC = false;
tcb_desc->bRTSUseShortGI = false;
tcb_desc->bCTSEnable = false;
tcb_desc->RTSSC = 0;
tcb_desc->bRTSBW = false;
if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
return;
if (is_broadcast_ether_addr(skb->data + 16))
return;
if (ieee->mode < WIRELESS_MODE_N_24G) {
if (skb->len > ieee->rts) {
tcb_desc->bRTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
} else if (ieee->current_network.buseprotection) {
tcb_desc->bRTSEnable = true;
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
}
return;
}
ht_info = ieee->ht_info;
while (true) {
if (ht_info->iot_action & HT_IOT_ACT_FORCED_CTS2SELF) {
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
break;
} else if (ht_info->iot_action & (HT_IOT_ACT_FORCED_RTS |
HT_IOT_ACT_PURE_N_MODE)) {
tcb_desc->bRTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
break;
}
if (ieee->current_network.buseprotection) {
tcb_desc->bRTSEnable = true;
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
break;
}
if (ht_info->bCurrentHTSupport && ht_info->enable_ht) {
u8 HTOpMode = ht_info->current_op_mode;
if ((ht_info->bCurBW40MHz && (HTOpMode == 2 ||
HTOpMode == 3)) ||
(!ht_info->bCurBW40MHz && HTOpMode == 3)) {
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
break;
}
}
if (skb->len > ieee->rts) {
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
break;
}
if (tcb_desc->bAMPDUEnable) {
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = false;
break;
}
goto NO_PROTECTION;
}
if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
tcb_desc->bUseShortPreamble = true;
return;
NO_PROTECTION:
tcb_desc->bRTSEnable = false;
tcb_desc->bCTSEnable = false;
tcb_desc->rts_rate = 0;
tcb_desc->RTSSC = 0;
tcb_desc->bRTSBW = false;
}
static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
struct cb_desc *tcb_desc)
{
if (ieee->tx_dis_rate_fallback)
tcb_desc->tx_dis_rate_fallback = true;
if (ieee->tx_use_drv_assinged_rate)
tcb_desc->tx_use_drv_assinged_rate = true;
if (!tcb_desc->tx_dis_rate_fallback ||
!tcb_desc->tx_use_drv_assinged_rate) {
if (ieee->iw_mode == IW_MODE_INFRA ||
ieee->iw_mode == IW_MODE_ADHOC)
tcb_desc->ratr_index = 0;
}
}
static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
u8 *dst)
{
u16 seqnum = 0;
if (is_multicast_ether_addr(dst))
return 0;
if (IsQoSDataFrame(skb->data)) {
struct tx_ts_record *pTS = NULL;
if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
skb->priority, TX_DIR, true))
return 0;
seqnum = pTS->TxCurSeq;
pTS->TxCurSeq = (pTS->TxCurSeq + 1) % 4096;
return seqnum;
}
return 0;
}
static int wme_downgrade_ac(struct sk_buff *skb)
{
switch (skb->priority) {
case 6:
case 7:
skb->priority = 5; /* VO -> VI */
return 0;
case 4:
case 5:
skb->priority = 3; /* VI -> BE */
return 0;
case 0:
case 3:
skb->priority = 1; /* BE -> BK */
return 0;
default:
return -1;
}
}
static u8 rtllib_current_rate(struct rtllib_device *ieee)
{
if (ieee->mode & IEEE_MODE_MASK)
return ieee->rate;
if (ieee->HTCurrentOperaRate)
return ieee->HTCurrentOperaRate;
else
return ieee->rate & 0x7F;
}
static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
{
struct rtllib_device *ieee = (struct rtllib_device *)
netdev_priv_rsl(dev);
struct rtllib_txb *txb = NULL;
struct rtllib_hdr_3addrqos *frag_hdr;
int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
unsigned long flags;
struct net_device_stats *stats = &ieee->stats;
int ether_type = 0, encrypt;
int bytes, fc, qos_ctl = 0, hdr_len;
struct sk_buff *skb_frag;
struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
.duration_id = 0,
.seq_ctl = 0,
.qos_ctl = 0
};
int qos_activated = ieee->current_network.qos_data.active;
u8 dest[ETH_ALEN];
u8 src[ETH_ALEN];
struct lib80211_crypt_data *crypt = NULL;
struct cb_desc *tcb_desc;
u8 bIsMulticast = false;
u8 IsAmsdu = false;
bool bdhcp = false;
spin_lock_irqsave(&ieee->lock, flags);
/* If there is no driver handler to take the TXB, don't bother
* creating it...
*/
if (!(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) ||
((!ieee->softmac_data_hard_start_xmit &&
(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
netdev_warn(ieee->dev, "No xmit handler.\n");
goto success;
}
if (likely(ieee->raw_tx == 0)) {
if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
netdev_warn(ieee->dev, "skb too small (%d).\n",
skb->len);
goto success;
}
/* Save source and destination addresses */
ether_addr_copy(dest, skb->data);
ether_addr_copy(src, skb->data + ETH_ALEN);
memset(skb->cb, 0, sizeof(skb->cb));
ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
if (ieee->iw_mode == IW_MODE_MONITOR) {
txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
if (unlikely(!txb)) {
netdev_warn(ieee->dev,
"Could not allocate TXB\n");
goto failed;
}
txb->encrypted = 0;
txb->payload_size = cpu_to_le16(skb->len);
skb_put_data(txb->fragments[0], skb->data, skb->len);
goto success;
}
if (skb->len > 282) {
if (ether_type == ETH_P_IP) {
const struct iphdr *ip = (struct iphdr *)
((u8 *)skb->data + 14);
if (ip->protocol == IPPROTO_UDP) {
struct udphdr *udp;
udp = (struct udphdr *)((u8 *)ip +
(ip->ihl << 2));
if (((((u8 *)udp)[1] == 68) &&
(((u8 *)udp)[3] == 67)) ||
((((u8 *)udp)[1] == 67) &&
(((u8 *)udp)[3] == 68))) {
bdhcp = true;
ieee->LPSDelayCnt = 200;
}
}
} else if (ether_type == ETH_P_ARP) {
netdev_info(ieee->dev,
"=================>DHCP Protocol start tx ARP pkt!!\n");
bdhcp = true;
ieee->LPSDelayCnt =
ieee->current_network.tim.tim_count;
}
}
skb->priority = rtllib_classify(skb, IsAmsdu);
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && crypt && crypt->ops;
if (!encrypt && ieee->ieee802_1x &&
ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
stats->tx_dropped++;
goto success;
}
if (crypt && !encrypt && ether_type == ETH_P_PAE) {
struct eapol *eap = (struct eapol *)(skb->data +
sizeof(struct ethhdr) - SNAP_SIZE -
sizeof(u16));
netdev_dbg(ieee->dev,
"TX: IEEE 802.11 EAPOL frame: %s\n",
eap_get_type(eap->type));
}
/* Advance the SKB to the start of the payload */
skb_pull(skb, sizeof(struct ethhdr));
/* Determine total amount of storage required for TXB packets */
bytes = skb->len + SNAP_SIZE + sizeof(u16);
if (encrypt)
fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
else
fc = RTLLIB_FTYPE_DATA;
if (qos_activated)
fc |= RTLLIB_STYPE_QOS_DATA;
else
fc |= RTLLIB_STYPE_DATA;
if (ieee->iw_mode == IW_MODE_INFRA) {
fc |= RTLLIB_FCTL_TODS;
/* To DS: Addr1 = BSSID, Addr2 = SA,
* Addr3 = DA
*/
ether_addr_copy(header.addr1,
ieee->current_network.bssid);
ether_addr_copy(header.addr2, src);
if (IsAmsdu)
ether_addr_copy(header.addr3,
ieee->current_network.bssid);
else
ether_addr_copy(header.addr3, dest);
} else if (ieee->iw_mode == IW_MODE_ADHOC) {
/* not From/To DS: Addr1 = DA, Addr2 = SA,
* Addr3 = BSSID
*/
ether_addr_copy(header.addr1, dest);
ether_addr_copy(header.addr2, src);
ether_addr_copy(header.addr3,
ieee->current_network.bssid);
}
bIsMulticast = is_multicast_ether_addr(header.addr1);
header.frame_ctl = cpu_to_le16(fc);
/* Determine fragmentation size based on destination (multicast
* and broadcast are not fragmented)
*/
if (bIsMulticast) {
frag_size = MAX_FRAG_THRESHOLD;
qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
} else {
frag_size = ieee->fts;
qos_ctl = 0;
}
if (qos_activated) {
hdr_len = RTLLIB_3ADDR_LEN + 2;
/* in case we are a client verify acm is not set for this ac */
while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
netdev_info(ieee->dev, "skb->priority = %x\n",
skb->priority);
if (wme_downgrade_ac(skb))
break;
netdev_info(ieee->dev, "converted skb->priority = %x\n",
skb->priority);
}
qos_ctl |= skb->priority;
header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
} else {
hdr_len = RTLLIB_3ADDR_LEN;
}
/* Determine amount of payload per fragment. Regardless of if
* this stack is providing the full 802.11 header, one will
* eventually be affixed to this fragment -- so we must account
* for it when determining the amount of payload space.
*/
bytes_per_frag = frag_size - hdr_len;
if (ieee->config &
(CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
bytes_per_frag -= RTLLIB_FCS_LEN;
/* Each fragment may need to have room for encrypting
* pre/postfix
*/
if (encrypt) {
bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
crypt->ops->extra_mpdu_postfix_len +
crypt->ops->extra_msdu_prefix_len +
crypt->ops->extra_msdu_postfix_len;
}
/* Number of fragments is the total bytes_per_frag /
* payload_per_fragment
*/
nr_frags = bytes / bytes_per_frag;
bytes_last_frag = bytes % bytes_per_frag;
if (bytes_last_frag)
nr_frags++;
else
bytes_last_frag = bytes_per_frag;
/* When we allocate the TXB we allocate enough space for the
* reserve and full fragment bytes (bytes_per_frag doesn't
* include prefix, postfix, header, FCS, etc.)
*/
txb = rtllib_alloc_txb(nr_frags, frag_size +
ieee->tx_headroom, GFP_ATOMIC);
if (unlikely(!txb)) {
netdev_warn(ieee->dev, "Could not allocate TXB\n");
goto failed;
}
txb->encrypted = encrypt;
txb->payload_size = cpu_to_le16(bytes);
if (qos_activated)
txb->queue_index = UP2AC(skb->priority);
else
txb->queue_index = WME_AC_BE;
for (i = 0; i < nr_frags; i++) {
skb_frag = txb->fragments[i];
tcb_desc = (struct cb_desc *)(skb_frag->cb +
MAX_DEV_ADDR_SIZE);
if (qos_activated) {
skb_frag->priority = skb->priority;
tcb_desc->queue_index = UP2AC(skb->priority);
} else {
skb_frag->priority = WME_AC_BE;
tcb_desc->queue_index = WME_AC_BE;
}
skb_reserve(skb_frag, ieee->tx_headroom);
if (encrypt) {
if (ieee->hwsec_active)
tcb_desc->bHwSec = 1;
else
tcb_desc->bHwSec = 0;
skb_reserve(skb_frag,
crypt->ops->extra_mpdu_prefix_len +
crypt->ops->extra_msdu_prefix_len);
} else {
tcb_desc->bHwSec = 0;
}
frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
/* If this is not the last fragment, then add the
* MOREFRAGS bit to the frame control
*/
if (i != nr_frags - 1) {
frag_hdr->frame_ctl = cpu_to_le16(fc |
RTLLIB_FCTL_MOREFRAGS);
bytes = bytes_per_frag;
} else {
/* The last fragment has the remaining length */
bytes = bytes_last_frag;
}
if ((qos_activated) && (!bIsMulticast)) {
frag_hdr->seq_ctl =
cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
header.addr1));
frag_hdr->seq_ctl =
cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl) << 4 | i);
} else {
frag_hdr->seq_ctl =
cpu_to_le16(ieee->seq_ctrl[0] << 4 | i);
}
/* Put a SNAP header on the first fragment */
if (i == 0) {
rtllib_put_snap(skb_put(skb_frag,
SNAP_SIZE +
sizeof(u16)), ether_type);
bytes -= SNAP_SIZE + sizeof(u16);
}
skb_put_data(skb_frag, skb->data, bytes);
/* Advance the SKB... */
skb_pull(skb, bytes);
/* Encryption routine will move the header forward in
* order to insert the IV between the header and the
* payload
*/
if (encrypt)
rtllib_encrypt_fragment(ieee, skb_frag,
hdr_len);
if (ieee->config &
(CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
skb_put(skb_frag, 4);
}
if ((qos_activated) && (!bIsMulticast)) {
if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
else
ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
} else {
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
else
ieee->seq_ctrl[0]++;
}
} else {
if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
netdev_warn(ieee->dev, "skb too small (%d).\n",
skb->len);
goto success;
}
txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
if (!txb) {
netdev_warn(ieee->dev, "Could not allocate TXB\n");
goto failed;
}
txb->encrypted = 0;
txb->payload_size = cpu_to_le16(skb->len);
skb_put_data(txb->fragments[0], skb->data, skb->len);
}
success:
if (txb) {
tcb_desc = (struct cb_desc *)
(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bTxEnableFwCalcDur = 1;
tcb_desc->priority = skb->priority;
if (ether_type == ETH_P_PAE) {
if (ieee->ht_info->iot_action &
HT_IOT_ACT_WA_IOT_Broadcom) {
tcb_desc->data_rate =
MgntQuery_TxRateExcludeCCKRates(ieee);
tcb_desc->tx_dis_rate_fallback = false;
} else {
tcb_desc->data_rate = ieee->basic_rate;
tcb_desc->tx_dis_rate_fallback = 1;
}
tcb_desc->ratr_index = 7;
tcb_desc->tx_use_drv_assinged_rate = 1;
} else {
if (is_multicast_ether_addr(header.addr1))
tcb_desc->bMulticast = 1;
if (is_broadcast_ether_addr(header.addr1))
tcb_desc->bBroadcast = 1;
rtllib_txrate_selectmode(ieee, tcb_desc);
if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
tcb_desc->data_rate = ieee->basic_rate;
else
tcb_desc->data_rate = rtllib_current_rate(ieee);
if (bdhcp) {
if (ieee->ht_info->iot_action &
HT_IOT_ACT_WA_IOT_Broadcom) {
tcb_desc->data_rate =
MgntQuery_TxRateExcludeCCKRates(ieee);
tcb_desc->tx_dis_rate_fallback = false;
} else {
tcb_desc->data_rate = MGN_1M;
tcb_desc->tx_dis_rate_fallback = 1;
}
tcb_desc->ratr_index = 7;
tcb_desc->tx_use_drv_assinged_rate = 1;
tcb_desc->bdhcp = 1;
}
rtllib_query_ShortPreambleMode(ieee, tcb_desc);
rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
tcb_desc);
rtllib_query_HTCapShortGI(ieee, tcb_desc);
rtllib_query_BandwidthMode(ieee, tcb_desc);
rtllib_query_protectionmode(ieee, tcb_desc,
txb->fragments[0]);
}
}
spin_unlock_irqrestore(&ieee->lock, flags);
dev_kfree_skb_any(skb);
if (txb) {
if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
dev->stats.tx_packets++;
dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
rtllib_softmac_xmit(txb, ieee);
} else {
rtllib_txb_free(txb);
}
}
return 0;
failed:
spin_unlock_irqrestore(&ieee->lock, flags);
netif_stop_queue(dev);
stats->tx_errors++;
return 1;
}
netdev_tx_t rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
{
memset(skb->cb, 0, sizeof(skb->cb));
return rtllib_xmit_inter(skb, dev) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
}
EXPORT_SYMBOL(rtllib_xmit);
| linux-master | drivers/staging/rtl8192e/rtllib_tx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information: wlanfae <[email protected]>
*/
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <linux/etherdevice.h>
#include "rtllib.h"
#include "rtl819x_BA.h"
static void activate_ba_entry(struct ba_record *pBA, u16 Time)
{
pBA->b_valid = true;
if (Time != 0)
mod_timer(&pBA->timer, jiffies + msecs_to_jiffies(Time));
}
static void deactivate_ba_entry(struct rtllib_device *ieee, struct ba_record *pBA)
{
pBA->b_valid = false;
del_timer_sync(&pBA->timer);
}
static u8 tx_ts_delete_ba(struct rtllib_device *ieee, struct tx_ts_record *pTxTs)
{
struct ba_record *pAdmittedBa = &pTxTs->TxAdmittedBARecord;
struct ba_record *pPendingBa = &pTxTs->TxPendingBARecord;
u8 bSendDELBA = false;
if (pPendingBa->b_valid) {
deactivate_ba_entry(ieee, pPendingBa);
bSendDELBA = true;
}
if (pAdmittedBa->b_valid) {
deactivate_ba_entry(ieee, pAdmittedBa);
bSendDELBA = true;
}
return bSendDELBA;
}
static u8 rx_ts_delete_ba(struct rtllib_device *ieee, struct rx_ts_record *pRxTs)
{
struct ba_record *pBa = &pRxTs->rx_admitted_ba_record;
u8 bSendDELBA = false;
if (pBa->b_valid) {
deactivate_ba_entry(ieee, pBa);
bSendDELBA = true;
}
return bSendDELBA;
}
void rtllib_reset_ba_entry(struct ba_record *pBA)
{
pBA->b_valid = false;
pBA->ba_param_set.short_data = 0;
pBA->ba_timeout_value = 0;
pBA->dialog_token = 0;
pBA->ba_start_seq_ctrl.short_data = 0;
}
static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
struct ba_record *pBA,
u16 StatusCode, u8 type)
{
struct sk_buff *skb = NULL;
struct rtllib_hdr_3addr *BAReq = NULL;
u8 *tag = NULL;
u16 len = ieee->tx_headroom + 9;
netdev_dbg(ieee->dev, "%s(): frame(%d) sentd to: %pM, ieee->dev:%p\n",
__func__, type, Dst, ieee->dev);
if (!pBA) {
netdev_warn(ieee->dev, "pBA is NULL\n");
return NULL;
}
skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
if (!skb)
return NULL;
memset(skb->data, 0, sizeof(struct rtllib_hdr_3addr));
skb_reserve(skb, ieee->tx_headroom);
BAReq = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
ether_addr_copy(BAReq->addr1, Dst);
ether_addr_copy(BAReq->addr2, ieee->dev->dev_addr);
ether_addr_copy(BAReq->addr3, ieee->current_network.bssid);
BAReq->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT);
tag = skb_put(skb, 9);
*tag++ = ACT_CAT_BA;
*tag++ = type;
*tag++ = pBA->dialog_token;
if (type == ACT_ADDBARSP) {
put_unaligned_le16(StatusCode, tag);
tag += 2;
}
put_unaligned_le16(pBA->ba_param_set.short_data, tag);
tag += 2;
put_unaligned_le16(pBA->ba_timeout_value, tag);
tag += 2;
if (type == ACT_ADDBAREQ) {
memcpy(tag, (u8 *)&pBA->ba_start_seq_ctrl, 2);
tag += 2;
}
#ifdef VERBOSE_DEBUG
print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, skb->data,
__func__, skb->len);
#endif
return skb;
}
static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
struct ba_record *pBA,
enum tr_select TxRxSelect, u16 ReasonCode)
{
union delba_param_set DelbaParamSet;
struct sk_buff *skb = NULL;
struct rtllib_hdr_3addr *Delba = NULL;
u8 *tag = NULL;
u16 len = 6 + ieee->tx_headroom;
if (net_ratelimit())
netdev_dbg(ieee->dev, "%s(): ReasonCode(%d) sentd to: %pM\n",
__func__, ReasonCode, dst);
memset(&DelbaParamSet, 0, 2);
DelbaParamSet.field.initiator = (TxRxSelect == TX_DIR) ? 1 : 0;
DelbaParamSet.field.tid = pBA->ba_param_set.field.tid;
skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
Delba = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
ether_addr_copy(Delba->addr1, dst);
ether_addr_copy(Delba->addr2, ieee->dev->dev_addr);
ether_addr_copy(Delba->addr3, ieee->current_network.bssid);
Delba->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT);
tag = skb_put(skb, 6);
*tag++ = ACT_CAT_BA;
*tag++ = ACT_DELBA;
put_unaligned_le16(DelbaParamSet.short_data, tag);
tag += 2;
put_unaligned_le16(ReasonCode, tag);
tag += 2;
#ifdef VERBOSE_DEBUG
print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, skb->data,
__func__, skb->len);
#endif
return skb;
}
static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
struct ba_record *pBA)
{
struct sk_buff *skb;
skb = rtllib_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ);
if (skb)
softmac_mgmt_xmit(skb, ieee);
else
netdev_dbg(ieee->dev, "Failed to generate ADDBAReq packet.\n");
}
static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst,
struct ba_record *pBA, u16 StatusCode)
{
struct sk_buff *skb;
skb = rtllib_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP);
if (skb)
softmac_mgmt_xmit(skb, ieee);
else
netdev_dbg(ieee->dev, "Failed to generate ADDBARsp packet.\n");
}
static void rtllib_send_DELBA(struct rtllib_device *ieee, u8 *dst,
struct ba_record *pBA, enum tr_select TxRxSelect,
u16 ReasonCode)
{
struct sk_buff *skb;
skb = rtllib_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode);
if (skb)
softmac_mgmt_xmit(skb, ieee);
else
netdev_dbg(ieee->dev, "Failed to generate DELBA packet.\n");
}
int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct rtllib_hdr_3addr *req = NULL;
u16 rc = 0;
u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
struct ba_record *pBA = NULL;
union ba_param_set *pBaParamSet = NULL;
u16 *pBaTimeoutVal = NULL;
union sequence_control *pBaStartSeqCtrl = NULL;
struct rx_ts_record *pTS = NULL;
if (skb->len < sizeof(struct rtllib_hdr_3addr) + 9) {
netdev_warn(ieee->dev, "Invalid skb len in BAREQ(%d / %d)\n",
(int)skb->len,
(int)(sizeof(struct rtllib_hdr_3addr) + 9));
return -1;
}
#ifdef VERBOSE_DEBUG
print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, __func__,
skb->data, skb->len);
#endif
req = (struct rtllib_hdr_3addr *)skb->data;
tag = (u8 *)req;
dst = (u8 *)(&req->addr2[0]);
tag += sizeof(struct rtllib_hdr_3addr);
pDialogToken = tag + 2;
pBaParamSet = (union ba_param_set *)(tag + 3);
pBaTimeoutVal = (u16 *)(tag + 5);
pBaStartSeqCtrl = (union sequence_control *)(req + 7);
if (!ieee->current_network.qos_data.active ||
!ieee->ht_info->bCurrentHTSupport ||
(ieee->ht_info->iot_action & HT_IOT_ACT_REJECT_ADDBA_REQ)) {
rc = ADDBA_STATUS_REFUSED;
netdev_warn(ieee->dev,
"Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n",
ieee->current_network.qos_data.active,
ieee->ht_info->bCurrentHTSupport);
goto OnADDBAReq_Fail;
}
if (!GetTs(ieee, (struct ts_common_info **)&pTS, dst,
(u8)(pBaParamSet->field.tid), RX_DIR, true)) {
rc = ADDBA_STATUS_REFUSED;
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
goto OnADDBAReq_Fail;
}
pBA = &pTS->rx_admitted_ba_record;
if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
rc = ADDBA_STATUS_INVALID_PARAM;
netdev_warn(ieee->dev, "%s(): BA Policy is not correct\n",
__func__);
goto OnADDBAReq_Fail;
}
rtllib_FlushRxTsPendingPkts(ieee, pTS);
deactivate_ba_entry(ieee, pBA);
pBA->dialog_token = *pDialogToken;
pBA->ba_param_set = *pBaParamSet;
pBA->ba_timeout_value = *pBaTimeoutVal;
pBA->ba_start_seq_ctrl = *pBaStartSeqCtrl;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev) ||
(ieee->ht_info->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
pBA->ba_param_set.field.buffer_size = 1;
else
pBA->ba_param_set.field.buffer_size = 32;
activate_ba_entry(pBA, 0);
rtllib_send_ADDBARsp(ieee, dst, pBA, ADDBA_STATUS_SUCCESS);
return 0;
OnADDBAReq_Fail:
{
struct ba_record BA;
BA.ba_param_set = *pBaParamSet;
BA.ba_timeout_value = *pBaTimeoutVal;
BA.dialog_token = *pDialogToken;
BA.ba_param_set.field.ba_policy = BA_POLICY_IMMEDIATE;
rtllib_send_ADDBARsp(ieee, dst, &BA, rc);
return 0;
}
}
int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct rtllib_hdr_3addr *rsp = NULL;
struct ba_record *pPendingBA, *pAdmittedBA;
struct tx_ts_record *pTS = NULL;
u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
u16 *pStatusCode = NULL, *pBaTimeoutVal = NULL;
union ba_param_set *pBaParamSet = NULL;
u16 ReasonCode;
if (skb->len < sizeof(struct rtllib_hdr_3addr) + 9) {
netdev_warn(ieee->dev, "Invalid skb len in BARSP(%d / %d)\n",
(int)skb->len,
(int)(sizeof(struct rtllib_hdr_3addr) + 9));
return -1;
}
rsp = (struct rtllib_hdr_3addr *)skb->data;
tag = (u8 *)rsp;
dst = (u8 *)(&rsp->addr2[0]);
tag += sizeof(struct rtllib_hdr_3addr);
pDialogToken = tag + 2;
pStatusCode = (u16 *)(tag + 3);
pBaParamSet = (union ba_param_set *)(tag + 5);
pBaTimeoutVal = (u16 *)(tag + 7);
if (!ieee->current_network.qos_data.active ||
!ieee->ht_info->bCurrentHTSupport ||
!ieee->ht_info->bCurrentAMPDUEnable) {
netdev_warn(ieee->dev,
"reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n",
ieee->current_network.qos_data.active,
ieee->ht_info->bCurrentHTSupport,
ieee->ht_info->bCurrentAMPDUEnable);
ReasonCode = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
}
if (!GetTs(ieee, (struct ts_common_info **)&pTS, dst,
(u8)(pBaParamSet->field.tid), TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
ReasonCode = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
}
pTS->bAddBaReqInProgress = false;
pPendingBA = &pTS->TxPendingBARecord;
pAdmittedBA = &pTS->TxAdmittedBARecord;
if (pAdmittedBA->b_valid) {
netdev_dbg(ieee->dev, "%s(): ADDBA response already admitted\n",
__func__);
return -1;
} else if (!pPendingBA->b_valid ||
(*pDialogToken != pPendingBA->dialog_token)) {
netdev_warn(ieee->dev,
"%s(): ADDBA Rsp. BA invalid, DELBA!\n",
__func__);
ReasonCode = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
} else {
netdev_dbg(ieee->dev,
"%s(): Recv ADDBA Rsp. BA is admitted! Status code:%X\n",
__func__, *pStatusCode);
deactivate_ba_entry(ieee, pPendingBA);
}
if (*pStatusCode == ADDBA_STATUS_SUCCESS) {
if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
pTS->bAddBaReqDelayed = true;
deactivate_ba_entry(ieee, pAdmittedBA);
ReasonCode = DELBA_REASON_END_BA;
goto OnADDBARsp_Reject;
}
pAdmittedBA->dialog_token = *pDialogToken;
pAdmittedBA->ba_timeout_value = *pBaTimeoutVal;
pAdmittedBA->ba_start_seq_ctrl = pPendingBA->ba_start_seq_ctrl;
pAdmittedBA->ba_param_set = *pBaParamSet;
deactivate_ba_entry(ieee, pAdmittedBA);
activate_ba_entry(pAdmittedBA, *pBaTimeoutVal);
} else {
pTS->bAddBaReqDelayed = true;
pTS->bDisable_AddBa = true;
ReasonCode = DELBA_REASON_END_BA;
goto OnADDBARsp_Reject;
}
return 0;
OnADDBARsp_Reject:
{
struct ba_record BA;
BA.ba_param_set = *pBaParamSet;
rtllib_send_DELBA(ieee, dst, &BA, TX_DIR, ReasonCode);
return 0;
}
}
int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct rtllib_hdr_3addr *delba = NULL;
union delba_param_set *pDelBaParamSet = NULL;
u8 *dst = NULL;
if (skb->len < sizeof(struct rtllib_hdr_3addr) + 6) {
netdev_warn(ieee->dev, "Invalid skb len in DELBA(%d / %d)\n",
(int)skb->len,
(int)(sizeof(struct rtllib_hdr_3addr) + 6));
return -1;
}
if (!ieee->current_network.qos_data.active ||
!ieee->ht_info->bCurrentHTSupport) {
netdev_warn(ieee->dev,
"received DELBA while QOS or HT is not supported(%d, %d)\n",
ieee->current_network. qos_data.active,
ieee->ht_info->bCurrentHTSupport);
return -1;
}
#ifdef VERBOSE_DEBUG
print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, skb->data,
__func__, skb->len);
#endif
delba = (struct rtllib_hdr_3addr *)skb->data;
dst = (u8 *)(&delba->addr2[0]);
pDelBaParamSet = (union delba_param_set *)&delba->payload[2];
if (pDelBaParamSet->field.initiator == 1) {
struct rx_ts_record *pRxTs;
if (!GetTs(ieee, (struct ts_common_info **)&pRxTs, dst,
(u8)pDelBaParamSet->field.tid, RX_DIR, false)) {
netdev_warn(ieee->dev,
"%s(): can't get TS for RXTS. dst:%pM TID:%d\n",
__func__, dst,
(u8)pDelBaParamSet->field.tid);
return -1;
}
rx_ts_delete_ba(ieee, pRxTs);
} else {
struct tx_ts_record *pTxTs;
if (!GetTs(ieee, (struct ts_common_info **)&pTxTs, dst,
(u8)pDelBaParamSet->field.tid, TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS for TXTS\n",
__func__);
return -1;
}
pTxTs->bUsingBa = false;
pTxTs->bAddBaReqInProgress = false;
pTxTs->bAddBaReqDelayed = false;
del_timer_sync(&pTxTs->TsAddBaTimer);
tx_ts_delete_ba(ieee, pTxTs);
}
return 0;
}
void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *pTS,
u8 Policy, u8 bOverwritePending)
{
struct ba_record *pBA = &pTS->TxPendingBARecord;
if (pBA->b_valid && !bOverwritePending)
return;
deactivate_ba_entry(ieee, pBA);
pBA->dialog_token++;
pBA->ba_param_set.field.amsdu_support = 0;
pBA->ba_param_set.field.ba_policy = Policy;
pBA->ba_param_set.field.tid = pTS->TsCommonInfo.TSpec.f.TSInfo.field.ucTSID;
pBA->ba_param_set.field.buffer_size = 32;
pBA->ba_timeout_value = 0;
pBA->ba_start_seq_ctrl.field.seq_num = (pTS->TxCurSeq + 3) % 4096;
activate_ba_entry(pBA, BA_SETUP_TIMEOUT);
rtllib_send_ADDBAReq(ieee, pTS->TsCommonInfo.Addr, pBA);
}
void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
struct ts_common_info *pTsCommonInfo,
enum tr_select TxRxSelect)
{
if (TxRxSelect == TX_DIR) {
struct tx_ts_record *pTxTs =
(struct tx_ts_record *)pTsCommonInfo;
if (tx_ts_delete_ba(ieee, pTxTs))
rtllib_send_DELBA(ieee, pTsCommonInfo->Addr,
(pTxTs->TxAdmittedBARecord.b_valid) ?
(&pTxTs->TxAdmittedBARecord) :
(&pTxTs->TxPendingBARecord),
TxRxSelect, DELBA_REASON_END_BA);
} else if (TxRxSelect == RX_DIR) {
struct rx_ts_record *pRxTs =
(struct rx_ts_record *)pTsCommonInfo;
if (rx_ts_delete_ba(ieee, pRxTs))
rtllib_send_DELBA(ieee, pTsCommonInfo->Addr,
&pRxTs->rx_admitted_ba_record,
TxRxSelect, DELBA_REASON_END_BA);
}
}
void rtllib_ba_setup_timeout(struct timer_list *t)
{
struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
TxPendingBARecord.timer);
pTxTs->bAddBaReqInProgress = false;
pTxTs->bAddBaReqDelayed = true;
pTxTs->TxPendingBARecord.b_valid = false;
}
void rtllib_tx_ba_inact_timeout(struct timer_list *t)
{
struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
TxAdmittedBARecord.timer);
struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
TxTsRecord[pTxTs->num]);
tx_ts_delete_ba(ieee, pTxTs);
rtllib_send_DELBA(ieee, pTxTs->TsCommonInfo.Addr,
&pTxTs->TxAdmittedBARecord, TX_DIR,
DELBA_REASON_TIMEOUT);
}
void rtllib_rx_ba_inact_timeout(struct timer_list *t)
{
struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
rx_admitted_ba_record.timer);
struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
RxTsRecord[pRxTs->num]);
rx_ts_delete_ba(ieee, pRxTs);
rtllib_send_DELBA(ieee, pRxTs->ts_common_info.Addr,
&pRxTs->rx_admitted_ba_record, RX_DIR,
DELBA_REASON_TIMEOUT);
}
| linux-master | drivers/staging/rtl8192e/rtl819x_BAProc.c |
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information:
* wlanfae <[email protected]>
******************************************************************************/
#include "dot11d.h"
struct channel_list {
u8 channel[32];
u8 len;
};
static struct channel_list channel_array[] = {
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64,
149, 153, 157, 161, 165}, 24},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56,
60, 64}, 21},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
56, 60, 64}, 22},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
56, 60, 64}, 22},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
56, 60, 64}, 22},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
56, 60, 64}, 22},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52,
56, 60, 64}, 21}
};
void dot11d_init(struct rtllib_device *ieee)
{
struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
dot11d_info->enabled = false;
dot11d_info->state = DOT11D_STATE_NONE;
dot11d_info->country_len = 0;
memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
RESET_CIE_WATCHDOG(ieee);
}
EXPORT_SYMBOL(dot11d_init);
void dot11d_channel_map(u8 channel_plan, struct rtllib_device *ieee)
{
int i, max_chan = 14, min_chan = 1;
ieee->global_domain = false;
if (channel_array[channel_plan].len != 0) {
memset(GET_DOT11D_INFO(ieee)->channel_map, 0,
sizeof(GET_DOT11D_INFO(ieee)->channel_map));
for (i = 0; i < channel_array[channel_plan].len; i++) {
if (channel_array[channel_plan].channel[i] < min_chan ||
channel_array[channel_plan].channel[i] > max_chan)
break;
GET_DOT11D_INFO(ieee)->channel_map[channel_array
[channel_plan].channel[i]] = 1;
}
}
switch (channel_plan) {
case COUNTRY_CODE_GLOBAL_DOMAIN:
ieee->global_domain = true;
for (i = 12; i <= 14; i++)
GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
ieee->bss_start_channel = 10;
ieee->ibss_maxjoin_chal = 11;
break;
case COUNTRY_CODE_WORLD_WIDE_13:
for (i = 12; i <= 13; i++)
GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
ieee->bss_start_channel = 10;
ieee->ibss_maxjoin_chal = 11;
break;
default:
ieee->bss_start_channel = 1;
ieee->ibss_maxjoin_chal = 14;
break;
}
}
EXPORT_SYMBOL(dot11d_channel_map);
void dot11d_reset(struct rtllib_device *ieee)
{
struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
u32 i;
memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
for (i = 1; i <= 11; i++)
(dot11d_info->channel_map)[i] = 1;
for (i = 12; i <= 14; i++)
(dot11d_info->channel_map)[i] = 2;
dot11d_info->state = DOT11D_STATE_NONE;
dot11d_info->country_len = 0;
RESET_CIE_WATCHDOG(ieee);
}
void dot11d_update_country(struct rtllib_device *dev, u8 *address,
u16 country_len, u8 *country)
{
struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
u8 i, j, number_of_triples, max_channel_number;
struct chnl_txpow_triple *triple;
memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
max_channel_number = 0;
number_of_triples = (country_len - 3) / 3;
triple = (struct chnl_txpow_triple *)(country + 3);
for (i = 0; i < number_of_triples; i++) {
if (max_channel_number >= triple->first_channel) {
netdev_info(dev->dev,
"%s: Invalid country IE, skip it......1\n",
__func__);
return;
}
if (MAX_CHANNEL_NUMBER < (triple->first_channel +
triple->num_channels)) {
netdev_info(dev->dev,
"%s: Invalid country IE, skip it......2\n",
__func__);
return;
}
for (j = 0; j < triple->num_channels; j++) {
dot11d_info->channel_map[triple->first_channel + j] = 1;
dot11d_info->max_tx_power_list[triple->first_channel + j] =
triple->max_tx_power;
max_channel_number = triple->first_channel + j;
}
triple = (struct chnl_txpow_triple *)((u8 *)triple + 3);
}
UPDATE_CIE_SRC(dev, address);
dot11d_info->country_len = country_len;
memcpy(dot11d_info->country_buffer, country, country_len);
dot11d_info->state = DOT11D_STATE_LEARNED;
}
void dot11d_scan_complete(struct rtllib_device *dev)
{
struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
switch (dot11d_info->state) {
case DOT11D_STATE_LEARNED:
dot11d_info->state = DOT11D_STATE_DONE;
break;
case DOT11D_STATE_DONE:
dot11d_reset(dev);
break;
case DOT11D_STATE_NONE:
break;
}
}
| linux-master | drivers/staging/rtl8192e/dot11d.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Host AP crypt: host-based WEP encryption implementation for Host AP driver
*
* Copyright (c) 2002-2004, Jouni Malinen <[email protected]>
*/
#include <crypto/arc4.h>
#include <linux/fips.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include "rtllib.h"
#include <linux/crc32.h>
struct prism2_wep_data {
u32 iv;
#define WEP_KEY_LEN 13
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
struct arc4_ctx rx_ctx_arc4;
struct arc4_ctx tx_ctx_arc4;
};
static void *prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
if (fips_enabled)
return NULL;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
return NULL;
priv->key_idx = keyidx;
/* start WEP IV from a random value */
get_random_bytes(&priv->iv, 4);
return priv;
}
static void prism2_wep_deinit(void *priv)
{
kfree_sensitive(priv);
}
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
* for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
* so the payload length increases with 8 bytes.
*
* WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
*/
static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct prism2_wep_data *wep = priv;
u32 klen, len;
u8 key[WEP_KEY_LEN + 3];
u8 *pos;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
u32 crc;
u8 *icv;
if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
skb->len < hdr_len){
pr_err("Error!!! headroom=%d tailroom=%d skblen=%d hdr_len=%d\n",
skb_headroom(skb), skb_tailroom(skb), skb->len, hdr_len);
return -1;
}
len = skb->len - hdr_len;
pos = skb_push(skb, 4);
memmove(pos, pos + 4, hdr_len);
pos += hdr_len;
klen = 3 + wep->key_len;
wep->iv++;
/* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
* scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
* can be used to speedup attacks, so avoid using them.
*/
if ((wep->iv & 0xff00) == 0xff00) {
u8 B = (wep->iv >> 16) & 0xff;
if (B >= 3 && B < klen)
wep->iv += 0x0100;
}
/* Prepend 24-bit IV to RC4 key and TX frame */
*pos++ = key[0] = (wep->iv >> 16) & 0xff;
*pos++ = key[1] = (wep->iv >> 8) & 0xff;
*pos++ = key[2] = wep->iv & 0xff;
*pos++ = wep->key_idx << 6;
/* Copy rest of the WEP key (the secret part) */
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
icv = skb_put(skb, 4);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
arc4_setkey(&wep->tx_ctx_arc4, key, klen);
arc4_crypt(&wep->tx_ctx_arc4, pos, pos, len + 4);
}
return 0;
}
/* Perform WEP decryption on given struct buffer. Buffer includes whole WEP
* part of the frame: IV (4 bytes), encrypted payload (including SNAP header),
* ICV (4 bytes). len includes both IV and ICV.
*
* Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
* failure. If frame is OK, IV and ICV will be removed.
*/
static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct prism2_wep_data *wep = priv;
u32 klen, plen;
u8 key[WEP_KEY_LEN + 3];
u8 keyidx, *pos;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
u32 crc;
u8 icv[4];
if (skb->len < hdr_len + 8)
return -1;
pos = skb->data + hdr_len;
key[0] = *pos++;
key[1] = *pos++;
key[2] = *pos++;
keyidx = *pos++ >> 6;
if (keyidx != wep->key_idx)
return -1;
klen = 3 + wep->key_len;
/* Copy rest of the WEP key (the secret part) */
memcpy(key + 3, wep->key, wep->key_len);
/* Apply RC4 to data and compute CRC32 over decrypted data */
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
arc4_setkey(&wep->rx_ctx_arc4, key, klen);
arc4_crypt(&wep->rx_ctx_arc4, pos, pos, plen + 4);
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
if (memcmp(icv, pos + plen, 4) != 0) {
/* ICV mismatch - drop frame */
return -2;
}
}
/* Remove IV and ICV */
memmove(skb->data + 4, skb->data, hdr_len);
skb_pull(skb, 4);
skb_trim(skb, skb->len - 4);
return 0;
}
static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
{
struct prism2_wep_data *wep = priv;
if (len < 0 || len > WEP_KEY_LEN)
return -1;
memcpy(wep->key, key, len);
wep->key_len = len;
return 0;
}
static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
{
struct prism2_wep_data *wep = priv;
if (len < wep->key_len)
return -1;
memcpy(key, wep->key, wep->key_len);
return wep->key_len;
}
static void prism2_wep_print_stats(struct seq_file *m, void *priv)
{
struct prism2_wep_data *wep = priv;
seq_printf(m, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len);
}
static struct lib80211_crypto_ops rtllib_crypt_wep = {
.name = "R-WEP",
.init = prism2_wep_init,
.deinit = prism2_wep_deinit,
.encrypt_mpdu = prism2_wep_encrypt,
.decrypt_mpdu = prism2_wep_decrypt,
.encrypt_msdu = NULL,
.decrypt_msdu = NULL,
.set_key = prism2_wep_set_key,
.get_key = prism2_wep_get_key,
.print_stats = prism2_wep_print_stats,
.extra_mpdu_prefix_len = 4, /* IV */
.extra_mpdu_postfix_len = 4, /* ICV */
.owner = THIS_MODULE,
};
static int __init rtllib_crypto_wep_init(void)
{
return lib80211_register_crypto_ops(&rtllib_crypt_wep);
}
static void __exit rtllib_crypto_wep_exit(void)
{
lib80211_unregister_crypto_ops(&rtllib_crypt_wep);
}
module_init(rtllib_crypto_wep_init);
module_exit(rtllib_crypto_wep_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/rtl8192e/rtllib_crypt_wep.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Host AP crypt: host-based TKIP encryption implementation for Host AP driver
*
* Copyright (c) 2003-2004, Jouni Malinen <[email protected]>
*/
#include <crypto/arc4.h>
#include <crypto/hash.h>
#include <linux/fips.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/string.h>
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include "rtllib.h"
struct rtllib_tkip_data {
#define TKIP_KEY_LEN 32
u8 key[TKIP_KEY_LEN];
int key_set;
u32 tx_iv32;
u16 tx_iv16;
u16 tx_ttak[5];
int tx_phase1_done;
u32 rx_iv32;
u16 rx_iv16;
bool initialized;
u16 rx_ttak[5];
int rx_phase1_done;
u32 rx_iv32_new;
u16 rx_iv16_new;
u32 dot11RSNAStatsTKIPReplays;
u32 dot11RSNAStatsTKIPICVErrors;
u32 dot11RSNAStatsTKIPLocalMICFailures;
int key_idx;
struct arc4_ctx rx_ctx_arc4;
struct arc4_ctx tx_ctx_arc4;
struct crypto_shash *rx_tfm_michael;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16];
u8 tx_hdr[16];
};
static void *rtllib_tkip_init(int key_idx)
{
struct rtllib_tkip_data *priv;
if (fips_enabled)
return NULL;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (!priv)
goto fail;
priv->key_idx = key_idx;
priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->tx_tfm_michael)) {
pr_debug("Could not allocate crypto API michael_mic\n");
priv->tx_tfm_michael = NULL;
goto fail;
}
priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->rx_tfm_michael)) {
pr_debug("Could not allocate crypto API michael_mic\n");
priv->rx_tfm_michael = NULL;
goto fail;
}
return priv;
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
crypto_free_shash(priv->rx_tfm_michael);
kfree(priv);
}
return NULL;
}
static void rtllib_tkip_deinit(void *priv)
{
struct rtllib_tkip_data *_priv = priv;
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
crypto_free_shash(_priv->rx_tfm_michael);
}
kfree_sensitive(priv);
}
static inline u16 RotR1(u16 val)
{
return (val >> 1) | (val << 15);
}
static inline u8 Lo8(u16 val)
{
return val & 0xff;
}
static inline u8 Hi8(u16 val)
{
return val >> 8;
}
static inline u16 Lo16(u32 val)
{
return val & 0xffff;
}
static inline u16 Hi16(u32 val)
{
return val >> 16;
}
static inline u16 Mk16(u8 hi, u8 lo)
{
return lo | (hi << 8);
}
static inline u16 Mk16_le(u16 *v)
{
return *v;
}
static const u16 Sbox[256] = {
0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
};
static inline u16 _S_(u16 v)
{
u16 t = Sbox[Hi8(v)];
return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
}
#define PHASE1_LOOP_COUNT 8
static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
{
int i, j;
/* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
TTAK[0] = Lo16(IV32);
TTAK[1] = Hi16(IV32);
TTAK[2] = Mk16(TA[1], TA[0]);
TTAK[3] = Mk16(TA[3], TA[2]);
TTAK[4] = Mk16(TA[5], TA[4]);
for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
j = 2 * (i & 1);
TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
}
}
static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
u16 IV16)
{
/* Make temporary area overlap WEP seed so that the final copy can be
* avoided on little endian hosts.
*/
u16 *PPK = (u16 *)&WEPSeed[4];
/* Step 1 - make copy of TTAK and bring in TSC */
PPK[0] = TTAK[0];
PPK[1] = TTAK[1];
PPK[2] = TTAK[2];
PPK[3] = TTAK[3];
PPK[4] = TTAK[4];
PPK[5] = TTAK[4] + IV16;
/* Step 2 - 96-bit bijective mixing using S-box */
PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *)&TK[0]));
PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *)&TK[2]));
PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *)&TK[4]));
PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *)&TK[6]));
PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *)&TK[8]));
PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *)&TK[10]));
PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *)&TK[12]));
PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *)&TK[14]));
PPK[2] += RotR1(PPK[1]);
PPK[3] += RotR1(PPK[2]);
PPK[4] += RotR1(PPK[3]);
PPK[5] += RotR1(PPK[4]);
/* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
* WEPSeed[0..2] is transmitted as WEP IV
*/
WEPSeed[0] = Hi8(IV16);
WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
WEPSeed[2] = Lo8(IV16);
WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *)&TK[0])) >> 1);
#ifdef __BIG_ENDIAN
{
int i;
for (i = 0; i < 6; i++)
PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
}
#endif
}
static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
int len;
u8 *pos;
struct rtllib_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
int ret = 0;
u8 rc4key[16], *icv;
u32 crc;
if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
skb->len < hdr_len)
return -1;
hdr = (struct rtllib_hdr_4addr *)skb->data;
if (!tcb_desc->bHwSec) {
if (!tkey->tx_phase1_done) {
tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
tkey->tx_iv32);
tkey->tx_phase1_done = 1;
}
tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak,
tkey->tx_iv16);
} else {
tkey->tx_phase1_done = 1;
}
len = skb->len - hdr_len;
pos = skb_push(skb, 8);
memmove(pos, pos + 8, hdr_len);
pos += hdr_len;
if (tcb_desc->bHwSec) {
*pos++ = Hi8(tkey->tx_iv16);
*pos++ = (Hi8(tkey->tx_iv16) | 0x20) & 0x7F;
*pos++ = Lo8(tkey->tx_iv16);
} else {
*pos++ = rc4key[0];
*pos++ = rc4key[1];
*pos++ = rc4key[2];
}
*pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */;
*pos++ = tkey->tx_iv32 & 0xff;
*pos++ = (tkey->tx_iv32 >> 8) & 0xff;
*pos++ = (tkey->tx_iv32 >> 16) & 0xff;
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
arc4_setkey(&tkey->tx_ctx_arc4, rc4key, 16);
arc4_crypt(&tkey->tx_ctx_arc4, pos, pos, len + 4);
}
tkey->tx_iv16++;
if (tkey->tx_iv16 == 0) {
tkey->tx_phase1_done = 0;
tkey->tx_iv32++;
}
if (!tcb_desc->bHwSec)
return ret;
return 0;
}
static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
u8 keyidx, *pos;
u32 iv32;
u16 iv16;
struct rtllib_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
u8 rc4key[16];
u8 icv[4];
u32 crc;
int plen;
if (skb->len < hdr_len + 8 + 4)
return -1;
hdr = (struct rtllib_hdr_4addr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
if (net_ratelimit()) {
netdev_dbg(skb->dev,
"Received packet without ExtIV flag from %pM\n",
hdr->addr2);
}
return -2;
}
keyidx >>= 6;
if (tkey->key_idx != keyidx) {
netdev_dbg(skb->dev,
"RX tkey->key_idx=%d frame keyidx=%d priv=%p\n",
tkey->key_idx, keyidx, priv);
return -6;
}
if (!tkey->key_set) {
if (net_ratelimit()) {
netdev_dbg(skb->dev,
"Received packet from %pM with keyid=%d that does not have a configured key\n",
hdr->addr2, keyidx);
}
return -3;
}
iv16 = (pos[0] << 8) | pos[2];
iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
pos += 8;
if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
if ((iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) &&
tkey->initialized) {
if (net_ratelimit()) {
netdev_dbg(skb->dev,
"Replay detected: STA= %pM previous TSC %08x%04x received TSC %08x%04x\n",
hdr->addr2, tkey->rx_iv32,
tkey->rx_iv16, iv32, iv16);
}
tkey->dot11RSNAStatsTKIPReplays++;
return -4;
}
tkey->initialized = true;
if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) {
tkip_mixing_phase1(tkey->rx_ttak, tkey->key,
hdr->addr2, iv32);
tkey->rx_phase1_done = 1;
}
tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16);
plen = skb->len - hdr_len - 12;
arc4_setkey(&tkey->rx_ctx_arc4, rc4key, 16);
arc4_crypt(&tkey->rx_ctx_arc4, pos, pos, plen + 4);
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
if (memcmp(icv, pos + plen, 4) != 0) {
if (iv32 != tkey->rx_iv32) {
/* Previously cached Phase1 result was already
* lost, so it needs to be recalculated for the
* next packet.
*/
tkey->rx_phase1_done = 0;
}
if (net_ratelimit()) {
netdev_dbg(skb->dev,
"ICV error detected: STA= %pM\n",
hdr->addr2);
}
tkey->dot11RSNAStatsTKIPICVErrors++;
return -5;
}
}
/* Update real counters only after Michael MIC verification has
* completed
*/
tkey->rx_iv32_new = iv32;
tkey->rx_iv16_new = iv16;
/* Remove IV and ICV */
memmove(skb->data + 8, skb->data, hdr_len);
skb_pull(skb, 8);
skb_trim(skb, skb->len - 4);
return keyidx;
}
static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
u8 *data, size_t data_len, u8 *mic)
{
SHASH_DESC_ON_STACK(desc, tfm_michael);
int err;
desc->tfm = tfm_michael;
if (crypto_shash_setkey(tfm_michael, key, 8))
return -1;
err = crypto_shash_init(desc);
if (err)
goto out;
err = crypto_shash_update(desc, hdr, 16);
if (err)
goto out;
err = crypto_shash_update(desc, data, data_len);
if (err)
goto out;
err = crypto_shash_final(desc, mic);
out:
shash_desc_zero(desc);
return err;
}
static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
{
struct rtllib_hdr_4addr *hdr11;
hdr11 = (struct rtllib_hdr_4addr *)skb->data;
switch (le16_to_cpu(hdr11->frame_ctl) &
(RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) {
case RTLLIB_FCTL_TODS:
ether_addr_copy(hdr, hdr11->addr3); /* DA */
ether_addr_copy(hdr + ETH_ALEN, hdr11->addr2); /* SA */
break;
case RTLLIB_FCTL_FROMDS:
ether_addr_copy(hdr, hdr11->addr1); /* DA */
ether_addr_copy(hdr + ETH_ALEN, hdr11->addr3); /* SA */
break;
case RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS:
ether_addr_copy(hdr, hdr11->addr3); /* DA */
ether_addr_copy(hdr + ETH_ALEN, hdr11->addr4); /* SA */
break;
case 0:
ether_addr_copy(hdr, hdr11->addr1); /* DA */
ether_addr_copy(hdr + ETH_ALEN, hdr11->addr2); /* SA */
break;
}
/* priority */
hdr[12] = 0;
/* reserved */
hdr[13] = 0;
hdr[14] = 0;
hdr[15] = 0;
}
static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
u8 *pos;
struct rtllib_hdr_4addr *hdr;
hdr = (struct rtllib_hdr_4addr *)skb->data;
if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
netdev_dbg(skb->dev,
"Invalid packet for Michael MIC add (tailroom=%d hdr_len=%d skb->len=%d)\n",
skb_tailroom(skb), hdr_len, skb->len);
return -1;
}
michael_mic_hdr(skb, tkey->tx_hdr);
if (RTLLIB_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
pos = skb_put(skb, 8);
if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
return -1;
return 0;
}
static void rtllib_michael_mic_failure(struct net_device *dev,
struct rtllib_hdr_4addr *hdr,
int keyidx)
{
union iwreq_data wrqu;
struct iw_michaelmicfailure ev;
/* TODO: needed parameters: count, keyid, key type, TSC */
memset(&ev, 0, sizeof(ev));
ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
if (hdr->addr1[0] & 0x01)
ev.flags |= IW_MICFAILURE_GROUP;
else
ev.flags |= IW_MICFAILURE_PAIRWISE;
ev.src_addr.sa_family = ARPHRD_ETHER;
ether_addr_copy(ev.src_addr.sa_data, hdr->addr2);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = sizeof(ev);
wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
}
static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
int hdr_len, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
u8 mic[8];
struct rtllib_hdr_4addr *hdr;
hdr = (struct rtllib_hdr_4addr *)skb->data;
if (!tkey->key_set)
return -1;
michael_mic_hdr(skb, tkey->rx_hdr);
if (RTLLIB_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
return -1;
if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
struct rtllib_hdr_4addr *hdr;
hdr = (struct rtllib_hdr_4addr *)skb->data;
netdev_dbg(skb->dev,
"Michael MIC verification failed for MSDU from %pM keyidx=%d\n",
hdr->addr2, keyidx);
netdev_dbg(skb->dev, "%d\n",
memcmp(mic, skb->data + skb->len - 8, 8) != 0);
if (skb->dev) {
pr_info("skb->dev != NULL\n");
rtllib_michael_mic_failure(skb->dev, hdr, keyidx);
}
tkey->dot11RSNAStatsTKIPLocalMICFailures++;
return -1;
}
/* Update TSC counters for RX now that the packet verification has
* completed.
*/
tkey->rx_iv32 = tkey->rx_iv32_new;
tkey->rx_iv16 = tkey->rx_iv16_new;
skb_trim(skb, skb->len - 8);
return 0;
}
static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
tkey->key_idx = keyidx;
tkey->tx_tfm_michael = tfm;
tkey->rx_tfm_michael = tfm3;
if (len == TKIP_KEY_LEN) {
memcpy(tkey->key, key, TKIP_KEY_LEN);
tkey->key_set = 1;
tkey->tx_iv16 = 1; /* TSC is initialized to 1 */
if (seq) {
tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) |
(seq[3] << 8) | seq[2];
tkey->rx_iv16 = (seq[1] << 8) | seq[0];
}
} else if (len == 0) {
tkey->key_set = 0;
} else {
return -1;
}
return 0;
}
static int rtllib_tkip_get_key(void *key, int len, u8 *seq, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
if (len < TKIP_KEY_LEN)
return -1;
if (!tkey->key_set)
return 0;
memcpy(key, tkey->key, TKIP_KEY_LEN);
if (seq) {
/* Return the sequence number of the last transmitted frame. */
u16 iv16 = tkey->tx_iv16;
u32 iv32 = tkey->tx_iv32;
if (iv16 == 0)
iv32--;
iv16--;
seq[0] = tkey->tx_iv16;
seq[1] = tkey->tx_iv16 >> 8;
seq[2] = tkey->tx_iv32;
seq[3] = tkey->tx_iv32 >> 8;
seq[4] = tkey->tx_iv32 >> 16;
seq[5] = tkey->tx_iv32 >> 24;
}
return TKIP_KEY_LEN;
}
static void rtllib_tkip_print_stats(struct seq_file *m, void *priv)
{
struct rtllib_tkip_data *tkip = priv;
seq_printf(m,
"key[%d] alg=TKIP key_set=%d tx_pn=%02x%02x%02x%02x%02x%02x rx_pn=%02x%02x%02x%02x%02x%02x replays=%d icv_errors=%d local_mic_failures=%d\n",
tkip->key_idx, tkip->key_set,
(tkip->tx_iv32 >> 24) & 0xff,
(tkip->tx_iv32 >> 16) & 0xff,
(tkip->tx_iv32 >> 8) & 0xff,
tkip->tx_iv32 & 0xff,
(tkip->tx_iv16 >> 8) & 0xff,
tkip->tx_iv16 & 0xff,
(tkip->rx_iv32 >> 24) & 0xff,
(tkip->rx_iv32 >> 16) & 0xff,
(tkip->rx_iv32 >> 8) & 0xff,
tkip->rx_iv32 & 0xff,
(tkip->rx_iv16 >> 8) & 0xff,
tkip->rx_iv16 & 0xff,
tkip->dot11RSNAStatsTKIPReplays,
tkip->dot11RSNAStatsTKIPICVErrors,
tkip->dot11RSNAStatsTKIPLocalMICFailures);
}
static struct lib80211_crypto_ops rtllib_crypt_tkip = {
.name = "R-TKIP",
.init = rtllib_tkip_init,
.deinit = rtllib_tkip_deinit,
.encrypt_mpdu = rtllib_tkip_encrypt,
.decrypt_mpdu = rtllib_tkip_decrypt,
.encrypt_msdu = rtllib_michael_mic_add,
.decrypt_msdu = rtllib_michael_mic_verify,
.set_key = rtllib_tkip_set_key,
.get_key = rtllib_tkip_get_key,
.print_stats = rtllib_tkip_print_stats,
.extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */
.extra_mpdu_postfix_len = 4, /* ICV */
.extra_msdu_postfix_len = 8, /* MIC */
.owner = THIS_MODULE,
};
static int __init rtllib_crypto_tkip_init(void)
{
return lib80211_register_crypto_ops(&rtllib_crypt_tkip);
}
static void __exit rtllib_crypto_tkip_exit(void)
{
lib80211_unregister_crypto_ops(&rtllib_crypt_tkip);
}
module_init(rtllib_crypto_tkip_init);
module_exit(rtllib_crypto_tkip_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/rtl8192e/rtllib_crypt_tkip.c |
// SPDX-License-Identifier: GPL-2.0
/* Host AP crypt: host-based CCMP encryption implementation for Host AP driver
*
* Copyright (c) 2003-2004, Jouni Malinen <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/string.h>
#include <linux/wireless.h>
#include "rtllib.h"
#include <linux/crypto.h>
#include <crypto/aead.h>
#include <linux/scatterlist.h>
#define AES_BLOCK_LEN 16
#define CCMP_HDR_LEN 8
#define CCMP_MIC_LEN 8
#define CCMP_TK_LEN 16
#define CCMP_PN_LEN 6
struct rtllib_ccmp_data {
u8 key[CCMP_TK_LEN];
int key_set;
u8 tx_pn[CCMP_PN_LEN];
u8 rx_pn[CCMP_PN_LEN];
u32 dot11rsna_stats_ccmp_format_errors;
u32 dot11rsna_stats_ccmp_replays;
u32 dot11rsna_stats_ccmp_decrypt_errors;
int key_idx;
struct crypto_aead *tfm;
/* scratch buffers for virt_to_page() (crypto API) */
u8 tx_aad[2 * AES_BLOCK_LEN];
u8 rx_aad[2 * AES_BLOCK_LEN];
};
static void *rtllib_ccmp_init(int key_idx)
{
struct rtllib_ccmp_data *priv;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
priv->key_idx = key_idx;
priv->tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
pr_debug("Could not allocate crypto API aes\n");
priv->tfm = NULL;
goto fail;
}
return priv;
fail:
if (priv) {
if (priv->tfm)
crypto_free_aead(priv->tfm);
kfree(priv);
}
return NULL;
}
static void rtllib_ccmp_deinit(void *priv)
{
struct rtllib_ccmp_data *_priv = priv;
if (_priv && _priv->tfm)
crypto_free_aead(_priv->tfm);
kfree(priv);
}
static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
u8 *pn, u8 *iv, u8 *aad)
{
u8 *pos, qc = 0;
size_t aad_len;
u16 fc;
int a4_included, qc_included;
fc = le16_to_cpu(hdr->frame_ctl);
a4_included = ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) ==
(RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS));
qc_included = ((WLAN_FC_GET_TYPE(fc) == RTLLIB_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x80));
aad_len = 22;
if (a4_included)
aad_len += 6;
if (qc_included) {
pos = (u8 *)&hdr->addr4;
if (a4_included)
pos += 6;
qc = *pos & 0x0f;
aad_len += 2;
}
/* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
* mode authentication are not allowed to collide, yet both are derived
* from the same vector. We only set L := 1 here to indicate that the
* data size can be represented in (L+1) bytes. The CCM layer will take
* care of storing the data length in the top (L+1) bytes and setting
* and clearing the other bits as is required to derive the two IVs.
*/
iv[0] = 0x1;
/* Nonce: QC | A2 | PN */
iv[1] = qc;
memcpy(iv + 2, hdr->addr2, ETH_ALEN);
memcpy(iv + 8, pn, CCMP_PN_LEN);
/* AAD:
* FC with bits 4..6 and 11..13 masked to zero; 14 is always one
* A1 | A2 | A3
* SC with bits 4..15 (seq#) masked to zero
* A4 (if present)
* QC (if present)
*/
pos = (u8 *)hdr;
aad[0] = pos[0] & 0x8f;
aad[1] = pos[1] & 0xc7;
memcpy(&aad[2], &hdr->addr1, ETH_ALEN);
memcpy(&aad[8], &hdr->addr2, ETH_ALEN);
memcpy(&aad[14], &hdr->addr3, ETH_ALEN);
pos = (u8 *)&hdr->seq_ctl;
aad[20] = pos[0] & 0x0f;
aad[21] = 0; /* all bits masked */
memset(aad + 22, 0, 8);
if (a4_included)
memcpy(aad + 22, hdr->addr4, ETH_ALEN);
if (qc_included) {
aad[a4_included ? 28 : 22] = qc;
/* rest of QC masked */
}
return aad_len;
}
static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct rtllib_ccmp_data *key = priv;
int i;
u8 *pos;
struct rtllib_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
if (skb_headroom(skb) < CCMP_HDR_LEN ||
skb_tailroom(skb) < CCMP_MIC_LEN ||
skb->len < hdr_len)
return -1;
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
pos += hdr_len;
i = CCMP_PN_LEN - 1;
while (i >= 0) {
key->tx_pn[i]++;
if (key->tx_pn[i] != 0)
break;
i--;
}
*pos++ = key->tx_pn[5];
*pos++ = key->tx_pn[4];
*pos++ = 0;
*pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */;
*pos++ = key->tx_pn[3];
*pos++ = key->tx_pn[2];
*pos++ = key->tx_pn[1];
*pos++ = key->tx_pn[0];
hdr = (struct rtllib_hdr_4addr *)skb->data;
if (!tcb_desc->bHwSec) {
struct aead_request *req;
struct scatterlist sg[2];
u8 *aad = key->tx_aad;
u8 iv[AES_BLOCK_LEN];
int aad_len, ret;
int data_len = skb->len - hdr_len - CCMP_HDR_LEN;
req = aead_request_alloc(key->tfm, GFP_ATOMIC);
if (!req)
return -ENOMEM;
aad_len = ccmp_init_iv_and_aad(hdr, key->tx_pn, iv, aad);
skb_put(skb, CCMP_MIC_LEN);
sg_init_table(sg, 2);
sg_set_buf(&sg[0], aad, aad_len);
sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN,
data_len + CCMP_MIC_LEN);
aead_request_set_callback(req, 0, NULL, NULL);
aead_request_set_ad(req, aad_len);
aead_request_set_crypt(req, sg, sg, data_len, iv);
ret = crypto_aead_encrypt(req);
aead_request_free(req);
return ret;
}
return 0;
}
static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct rtllib_ccmp_data *key = priv;
u8 keyidx, *pos;
struct rtllib_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
u8 pn[6];
if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
key->dot11rsna_stats_ccmp_format_errors++;
return -1;
}
hdr = (struct rtllib_hdr_4addr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
if (net_ratelimit()) {
pr_debug("CCMP: received packet without ExtIV flag from %pM\n",
hdr->addr2);
}
key->dot11rsna_stats_ccmp_format_errors++;
return -2;
}
keyidx >>= 6;
if (key->key_idx != keyidx) {
pr_debug("CCMP: RX tkey->key_idx=%d frame keyidx=%d priv=%p\n",
key->key_idx, keyidx, priv);
return -6;
}
if (!key->key_set) {
if (net_ratelimit()) {
pr_debug("CCMP: received packet from %pM with keyid=%d that does not have a configured key\n",
hdr->addr2, keyidx);
}
return -3;
}
pn[0] = pos[7];
pn[1] = pos[6];
pn[2] = pos[5];
pn[3] = pos[4];
pn[4] = pos[1];
pn[5] = pos[0];
pos += 8;
if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) {
key->dot11rsna_stats_ccmp_replays++;
return -4;
}
if (!tcb_desc->bHwSec) {
size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
struct aead_request *req;
struct scatterlist sg[2];
u8 *aad = key->rx_aad;
u8 iv[AES_BLOCK_LEN];
int aad_len, ret;
req = aead_request_alloc(key->tfm, GFP_ATOMIC);
if (!req)
return -ENOMEM;
aad_len = ccmp_init_iv_and_aad(hdr, pn, iv, aad);
sg_init_table(sg, 2);
sg_set_buf(&sg[0], aad, aad_len);
sg_set_buf(&sg[1], pos, data_len);
aead_request_set_callback(req, 0, NULL, NULL);
aead_request_set_ad(req, aad_len);
aead_request_set_crypt(req, sg, sg, data_len, iv);
ret = crypto_aead_decrypt(req);
aead_request_free(req);
if (ret) {
if (net_ratelimit()) {
pr_debug("CCMP: decrypt failed: STA= %pM\n",
hdr->addr2);
}
key->dot11rsna_stats_ccmp_decrypt_errors++;
return -5;
}
memcpy(key->rx_pn, pn, CCMP_PN_LEN);
}
/* Remove hdr and MIC */
memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
skb_pull(skb, CCMP_HDR_LEN);
skb_trim(skb, skb->len - CCMP_MIC_LEN);
return keyidx;
}
static int rtllib_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
{
struct rtllib_ccmp_data *data = priv;
int keyidx;
struct crypto_aead *tfm = data->tfm;
keyidx = data->key_idx;
memset(data, 0, sizeof(*data));
data->key_idx = keyidx;
data->tfm = tfm;
if (len == CCMP_TK_LEN) {
memcpy(data->key, key, CCMP_TK_LEN);
data->key_set = 1;
if (seq) {
data->rx_pn[0] = seq[5];
data->rx_pn[1] = seq[4];
data->rx_pn[2] = seq[3];
data->rx_pn[3] = seq[2];
data->rx_pn[4] = seq[1];
data->rx_pn[5] = seq[0];
}
if (crypto_aead_setauthsize(data->tfm, CCMP_MIC_LEN) ||
crypto_aead_setkey(data->tfm, data->key, CCMP_TK_LEN))
return -1;
} else if (len == 0) {
data->key_set = 0;
} else {
return -1;
}
return 0;
}
static int rtllib_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
{
struct rtllib_ccmp_data *data = priv;
if (len < CCMP_TK_LEN)
return -1;
if (!data->key_set)
return 0;
memcpy(key, data->key, CCMP_TK_LEN);
if (seq) {
seq[0] = data->tx_pn[5];
seq[1] = data->tx_pn[4];
seq[2] = data->tx_pn[3];
seq[3] = data->tx_pn[2];
seq[4] = data->tx_pn[1];
seq[5] = data->tx_pn[0];
}
return CCMP_TK_LEN;
}
static void rtllib_ccmp_print_stats(struct seq_file *m, void *priv)
{
struct rtllib_ccmp_data *ccmp = priv;
seq_printf(m,
"key[%d] alg=CCMP key_set=%d tx_pn=%pM rx_pn=%pM format_errors=%d replays=%d decrypt_errors=%d\n",
ccmp->key_idx, ccmp->key_set,
ccmp->tx_pn, ccmp->rx_pn,
ccmp->dot11rsna_stats_ccmp_format_errors,
ccmp->dot11rsna_stats_ccmp_replays,
ccmp->dot11rsna_stats_ccmp_decrypt_errors);
}
static struct lib80211_crypto_ops rtllib_crypt_ccmp = {
.name = "R-CCMP",
.init = rtllib_ccmp_init,
.deinit = rtllib_ccmp_deinit,
.encrypt_mpdu = rtllib_ccmp_encrypt,
.decrypt_mpdu = rtllib_ccmp_decrypt,
.encrypt_msdu = NULL,
.decrypt_msdu = NULL,
.set_key = rtllib_ccmp_set_key,
.get_key = rtllib_ccmp_get_key,
.print_stats = rtllib_ccmp_print_stats,
.extra_mpdu_prefix_len = CCMP_HDR_LEN,
.extra_mpdu_postfix_len = CCMP_MIC_LEN,
.owner = THIS_MODULE,
};
static int __init rtllib_crypto_ccmp_init(void)
{
return lib80211_register_crypto_ops(&rtllib_crypt_ccmp);
}
static void __exit rtllib_crypto_ccmp_exit(void)
{
lib80211_unregister_crypto_ops(&rtllib_crypt_ccmp);
}
module_init(rtllib_crypto_ccmp_init);
module_exit(rtllib_crypto_ccmp_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/rtl8192e/rtllib_crypt_ccmp.c |
// SPDX-License-Identifier: GPL-2.0
/* IEEE 802.11 SoftMAC layer
* Copyright (c) 2005 Andrea Merello <[email protected]>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
*
* Few lines might be stolen from other part of the rtllib
* stack. Copyright who own it's copyright
*
* WPA code stolen from the ipw2200 driver.
* Copyright who own it's copyright.
*/
#include "rtllib.h"
#include <linux/random.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
#include "dot11d.h"
static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl);
static short rtllib_is_54g(struct rtllib_network *net)
{
return (net->rates_ex_len > 0) || (net->rates_len > 4);
}
/* returns the total length needed for placing the RATE MFIE
* tag and the EXTENDED RATE MFIE tag if needed.
* It encludes two bytes per tag for the tag itself and its len
*/
static unsigned int rtllib_MFIE_rate_len(struct rtllib_device *ieee)
{
unsigned int rate_len = 0;
rate_len = RTLLIB_CCK_RATE_LEN + 2;
rate_len += RTLLIB_OFDM_RATE_LEN + 2;
return rate_len;
}
/* place the MFIE rate, tag to the memory (double) pointed.
* Then it updates the pointer so that
* it points after the new MFIE tag added.
*/
static void rtllib_MFIE_Brate(struct rtllib_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
*tag++ = MFIE_TYPE_RATES;
*tag++ = 4;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_1MB;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_2MB;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_5MB;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_11MB;
/* We may add an option for custom rates that specific HW
* might support
*/
*tag_p = tag;
}
static void rtllib_MFIE_Grate(struct rtllib_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
*tag++ = MFIE_TYPE_RATES_EX;
*tag++ = 8;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_6MB;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_9MB;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_12MB;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_18MB;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_24MB;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_36MB;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_48MB;
*tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_54MB;
/* We may add an option for custom rates that specific HW might
* support
*/
*tag_p = tag;
}
static void rtllib_WMM_Info(struct rtllib_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
*tag++ = MFIE_TYPE_GENERIC;
*tag++ = 7;
*tag++ = 0x00;
*tag++ = 0x50;
*tag++ = 0xf2;
*tag++ = 0x02;
*tag++ = 0x00;
*tag++ = 0x01;
*tag++ = MAX_SP_Len;
*tag_p = tag;
}
static void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
*tag++ = MFIE_TYPE_GENERIC;
*tag++ = 7;
*tag++ = 0x00;
*tag++ = 0xe0;
*tag++ = 0x4c;
*tag++ = 0x01;
*tag++ = 0x02;
*tag++ = 0x11;
*tag++ = 0x00;
*tag_p = tag;
netdev_alert(ieee->dev, "This is enable turbo mode IE process\n");
}
static void enqueue_mgmt(struct rtllib_device *ieee, struct sk_buff *skb)
{
int nh;
nh = (ieee->mgmt_queue_head + 1) % MGMT_QUEUE_NUM;
/* if the queue is full but we have newer frames then
* just overwrites the oldest.
*
* if (nh == ieee->mgmt_queue_tail)
* return -1;
*/
ieee->mgmt_queue_head = nh;
ieee->mgmt_queue_ring[nh] = skb;
}
static void init_mgmt_queue(struct rtllib_device *ieee)
{
ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
}
u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee)
{
u16 i;
u8 QueryRate = 0;
u8 BasicRate;
for (i = 0; i < ieee->current_network.rates_len; i++) {
BasicRate = ieee->current_network.rates[i] & 0x7F;
if (!rtllib_is_cck_rate(BasicRate)) {
if (QueryRate == 0) {
QueryRate = BasicRate;
} else {
if (BasicRate < QueryRate)
QueryRate = BasicRate;
}
}
}
if (QueryRate == 0) {
QueryRate = 12;
netdev_info(ieee->dev, "No BasicRate found!!\n");
}
return QueryRate;
}
static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
u8 rate;
if (ht_info->iot_action & HT_IOT_ACT_MGNT_USE_CCK_6M)
rate = 0x0c;
else
rate = ieee->basic_rate & 0x7f;
if (rate == 0) {
if (ieee->mode == WIRELESS_MODE_N_24G && !ht_info->bCurSuppCCK)
rate = 0x0c;
else
rate = 0x02;
}
return rate;
}
inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
{
unsigned long flags;
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
struct rtllib_hdr_3addr *header =
(struct rtllib_hdr_3addr *)skb->data;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
spin_lock_irqsave(&ieee->lock, flags);
/* called with 2nd param 0, no mgmt lock required */
rtllib_sta_wakeup(ieee, 0);
if (le16_to_cpu(header->frame_ctl) == RTLLIB_STYPE_BEACON)
tcb_desc->queue_index = BEACON_QUEUE;
else
tcb_desc->queue_index = MGNT_QUEUE;
if (ieee->disable_mgnt_queue)
tcb_desc->queue_index = HIGH_QUEUE;
tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
if (single) {
if (ieee->queue_stop) {
enqueue_mgmt(ieee, skb);
} else {
header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
else
ieee->seq_ctrl[0]++;
/* avoid watchdog triggers */
ieee->softmac_data_hard_start_xmit(skb, ieee->dev,
ieee->basic_rate);
}
spin_unlock_irqrestore(&ieee->lock, flags);
} else {
spin_unlock_irqrestore(&ieee->lock, flags);
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
else
ieee->seq_ctrl[0]++;
/* check whether the managed packet queued greater than 5 */
if (!ieee->check_nic_enough_desc(ieee->dev,
tcb_desc->queue_index) ||
skb_queue_len(&ieee->skb_waitQ[tcb_desc->queue_index]) ||
ieee->queue_stop) {
/* insert the skb packet to the management queue
*
* as for the completion function, it does not need
* to check it any more.
*/
netdev_info(ieee->dev,
"%s():insert to waitqueue, queue_index:%d!\n",
__func__, tcb_desc->queue_index);
skb_queue_tail(&ieee->skb_waitQ[tcb_desc->queue_index],
skb);
} else {
ieee->softmac_hard_start_xmit(skb, ieee->dev);
}
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags);
}
}
static inline void
softmac_ps_mgmt_xmit(struct sk_buff *skb,
struct rtllib_device *ieee)
{
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
struct rtllib_hdr_3addr *header =
(struct rtllib_hdr_3addr *)skb->data;
u16 fc, type, stype;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
fc = le16_to_cpu(header->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
if (stype != RTLLIB_STYPE_PSPOLL)
tcb_desc->queue_index = MGNT_QUEUE;
else
tcb_desc->queue_index = HIGH_QUEUE;
if (ieee->disable_mgnt_queue)
tcb_desc->queue_index = HIGH_QUEUE;
tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
if (single) {
if (type != RTLLIB_FTYPE_CTL) {
header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
else
ieee->seq_ctrl[0]++;
}
/* avoid watchdog triggers */
ieee->softmac_data_hard_start_xmit(skb, ieee->dev,
ieee->basic_rate);
} else {
if (type != RTLLIB_FTYPE_CTL) {
header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
else
ieee->seq_ctrl[0]++;
}
ieee->softmac_hard_start_xmit(skb, ieee->dev);
}
}
static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
{
unsigned int len, rate_len;
u8 *tag;
struct sk_buff *skb;
struct rtllib_probe_request *req;
len = ieee->current_network.ssid_len;
rate_len = rtllib_MFIE_rate_len(ieee);
skb = dev_alloc_skb(sizeof(struct rtllib_probe_request) +
2 + len + rate_len + ieee->tx_headroom);
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
req = skb_put(skb, sizeof(struct rtllib_probe_request));
req->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_REQ);
req->header.duration_id = 0;
eth_broadcast_addr(req->header.addr1);
ether_addr_copy(req->header.addr2, ieee->dev->dev_addr);
eth_broadcast_addr(req->header.addr3);
tag = skb_put(skb, len + 2 + rate_len);
*tag++ = MFIE_TYPE_SSID;
*tag++ = len;
memcpy(tag, ieee->current_network.ssid, len);
tag += len;
rtllib_MFIE_Brate(ieee, &tag);
rtllib_MFIE_Grate(ieee, &tag);
return skb;
}
static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee);
static void rtllib_send_beacon(struct rtllib_device *ieee)
{
struct sk_buff *skb;
if (!ieee->ieee_up)
return;
skb = rtllib_get_beacon_(ieee);
if (skb) {
softmac_mgmt_xmit(skb, ieee);
ieee->softmac_stats.tx_beacons++;
}
if (ieee->beacon_txing && ieee->ieee_up)
mod_timer(&ieee->beacon_timer, jiffies +
(msecs_to_jiffies(ieee->current_network.beacon_interval - 5)));
}
static void rtllib_send_beacon_cb(struct timer_list *t)
{
struct rtllib_device *ieee =
from_timer(ieee, t, beacon_timer);
unsigned long flags;
spin_lock_irqsave(&ieee->beacon_lock, flags);
rtllib_send_beacon(ieee);
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
}
/* Enables network monitor mode, all rx packets will be received. */
void rtllib_EnableNetMonitorMode(struct net_device *dev,
bool bInitState)
{
struct rtllib_device *ieee = netdev_priv_rsl(dev);
netdev_info(dev, "========>Enter Monitor Mode\n");
ieee->AllowAllDestAddrHandler(dev, true, !bInitState);
}
/* Disables network monitor mode. Only packets destinated to
* us will be received.
*/
void rtllib_DisableNetMonitorMode(struct net_device *dev,
bool bInitState)
{
struct rtllib_device *ieee = netdev_priv_rsl(dev);
netdev_info(dev, "========>Exit Monitor Mode\n");
ieee->AllowAllDestAddrHandler(dev, false, !bInitState);
}
/* Enables the specialized promiscuous mode required by Intel.
* In this mode, Intel intends to hear traffics from/to other STAs in the
* same BSS. Therefore we don't have to disable checking BSSID and we only need
* to allow all dest. BUT: if we enable checking BSSID then we can't recv
* packets from other STA.
*/
void rtllib_EnableIntelPromiscuousMode(struct net_device *dev,
bool bInitState)
{
bool bFilterOutNonAssociatedBSSID = false;
struct rtllib_device *ieee = netdev_priv_rsl(dev);
netdev_info(dev, "========>Enter Intel Promiscuous Mode\n");
ieee->AllowAllDestAddrHandler(dev, true, !bInitState);
ieee->SetHwRegHandler(dev, HW_VAR_CECHK_BSSID,
(u8 *)&bFilterOutNonAssociatedBSSID);
ieee->net_promiscuous_md = true;
}
EXPORT_SYMBOL(rtllib_EnableIntelPromiscuousMode);
/* Disables the specialized promiscuous mode required by Intel.
* See MgntEnableIntelPromiscuousMode for detail.
*/
void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
bool bInitState)
{
bool bFilterOutNonAssociatedBSSID = true;
struct rtllib_device *ieee = netdev_priv_rsl(dev);
netdev_info(dev, "========>Exit Intel Promiscuous Mode\n");
ieee->AllowAllDestAddrHandler(dev, false, !bInitState);
ieee->SetHwRegHandler(dev, HW_VAR_CECHK_BSSID,
(u8 *)&bFilterOutNonAssociatedBSSID);
ieee->net_promiscuous_md = false;
}
EXPORT_SYMBOL(rtllib_DisableIntelPromiscuousMode);
static void rtllib_send_probe(struct rtllib_device *ieee)
{
struct sk_buff *skb;
skb = rtllib_probe_req(ieee);
if (skb) {
softmac_mgmt_xmit(skb, ieee);
ieee->softmac_stats.tx_probe_rq++;
}
}
static void rtllib_send_probe_requests(struct rtllib_device *ieee)
{
if (ieee->active_scan && (ieee->softmac_features &
IEEE_SOFTMAC_PROBERQ)) {
rtllib_send_probe(ieee);
rtllib_send_probe(ieee);
}
}
static void rtllib_update_active_chan_map(struct rtllib_device *ieee)
{
memcpy(ieee->active_channel_map, GET_DOT11D_INFO(ieee)->channel_map,
MAX_CHANNEL_NUMBER + 1);
}
/* this performs syncro scan blocking the caller until all channels
* in the allowed channel map has been checked.
*/
static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee)
{
union iwreq_data wrqu;
short ch = 0;
rtllib_update_active_chan_map(ieee);
ieee->be_scan_inprogress = true;
mutex_lock(&ieee->scan_mutex);
while (1) {
do {
ch++;
if (ch > MAX_CHANNEL_NUMBER)
goto out; /* scan completed */
} while (!ieee->active_channel_map[ch]);
/* this function can be called in two situations
* 1- We have switched to ad-hoc mode and we are
* performing a complete syncro scan before conclude
* there are no interesting cell and to create a
* new one. In this case the link state is
* MAC80211_NOLINK until we found an interesting cell.
* If so the ieee8021_new_net, called by the RX path
* will set the state to MAC80211_LINKED, so we stop
* scanning
* 2- We are linked and the root uses run iwlist scan.
* So we switch to MAC80211_LINKED_SCANNING to remember
* that we are still logically linked (not interested in
* new network events, despite for updating the net list,
* but we are temporarly 'unlinked' as the driver shall
* not filter RX frames and the channel is changing.
* So the only situation in which are interested is to check
* if the state become LINKED because of the #1 situation
*/
if (ieee->link_state == MAC80211_LINKED)
goto out;
if (ieee->sync_scan_hurryup) {
netdev_info(ieee->dev,
"============>sync_scan_hurryup out\n");
goto out;
}
ieee->set_chan(ieee->dev, ch);
if (ieee->active_channel_map[ch] == 1)
rtllib_send_probe_requests(ieee);
/* this prevent excessive time wait when we
* need to wait for a syncro scan to end..
*/
msleep_interruptible_rsl(RTLLIB_SOFTMAC_SCAN_TIME);
}
out:
ieee->actscanning = false;
ieee->sync_scan_hurryup = 0;
if (ieee->link_state >= MAC80211_LINKED) {
if (IS_DOT11D_ENABLE(ieee))
dot11d_scan_complete(ieee);
}
mutex_unlock(&ieee->scan_mutex);
ieee->be_scan_inprogress = false;
memset(&wrqu, 0, sizeof(wrqu));
wireless_send_event(ieee->dev, SIOCGIWSCAN, &wrqu, NULL);
}
static void rtllib_softmac_scan_wq(void *data)
{
struct rtllib_device *ieee = container_of_dwork_rsl(data,
struct rtllib_device, softmac_scan_wq);
u8 last_channel = ieee->current_network.channel;
rtllib_update_active_chan_map(ieee);
if (!ieee->ieee_up)
return;
if (rtllib_act_scanning(ieee, true))
return;
mutex_lock(&ieee->scan_mutex);
if (ieee->rf_power_state == rf_off) {
netdev_info(ieee->dev,
"======>%s():rf state is rf_off, return\n",
__func__);
goto out1;
}
do {
ieee->current_network.channel =
(ieee->current_network.channel + 1) %
MAX_CHANNEL_NUMBER;
if (ieee->scan_watch_dog++ > MAX_CHANNEL_NUMBER) {
if (!ieee->active_channel_map[ieee->current_network.channel])
ieee->current_network.channel = 6;
goto out; /* no good chans */
}
} while (!ieee->active_channel_map[ieee->current_network.channel]);
if (ieee->scanning_continue == 0)
goto out;
ieee->set_chan(ieee->dev, ieee->current_network.channel);
if (ieee->active_channel_map[ieee->current_network.channel] == 1)
rtllib_send_probe_requests(ieee);
schedule_delayed_work(&ieee->softmac_scan_wq,
msecs_to_jiffies(RTLLIB_SOFTMAC_SCAN_TIME));
mutex_unlock(&ieee->scan_mutex);
return;
out:
if (IS_DOT11D_ENABLE(ieee))
dot11d_scan_complete(ieee);
ieee->current_network.channel = last_channel;
out1:
ieee->actscanning = false;
ieee->scan_watch_dog = 0;
ieee->scanning_continue = 0;
mutex_unlock(&ieee->scan_mutex);
}
static void rtllib_beacons_start(struct rtllib_device *ieee)
{
unsigned long flags;
spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 1;
rtllib_send_beacon(ieee);
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
}
static void rtllib_beacons_stop(struct rtllib_device *ieee)
{
unsigned long flags;
spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 0;
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
del_timer_sync(&ieee->beacon_timer);
}
void rtllib_stop_send_beacons(struct rtllib_device *ieee)
{
ieee->stop_send_beacons(ieee->dev);
if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
rtllib_beacons_stop(ieee);
}
EXPORT_SYMBOL(rtllib_stop_send_beacons);
void rtllib_start_send_beacons(struct rtllib_device *ieee)
{
ieee->start_send_beacons(ieee->dev);
if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
rtllib_beacons_start(ieee);
}
EXPORT_SYMBOL(rtllib_start_send_beacons);
static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
{
mutex_lock(&ieee->scan_mutex);
ieee->scan_watch_dog = 0;
if (ieee->scanning_continue == 1) {
ieee->scanning_continue = 0;
ieee->actscanning = false;
mutex_unlock(&ieee->scan_mutex);
cancel_delayed_work_sync(&ieee->softmac_scan_wq);
} else {
mutex_unlock(&ieee->scan_mutex);
}
}
void rtllib_stop_scan(struct rtllib_device *ieee)
{
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
rtllib_softmac_stop_scan(ieee);
}
EXPORT_SYMBOL(rtllib_stop_scan);
void rtllib_stop_scan_syncro(struct rtllib_device *ieee)
{
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
ieee->sync_scan_hurryup = 1;
}
EXPORT_SYMBOL(rtllib_stop_scan_syncro);
bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan)
{
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
if (sync_scan)
return ieee->be_scan_inprogress;
else
return ieee->actscanning || ieee->be_scan_inprogress;
} else {
return test_bit(STATUS_SCANNING, &ieee->status);
}
}
EXPORT_SYMBOL(rtllib_act_scanning);
/* called with ieee->lock held */
static void rtllib_start_scan(struct rtllib_device *ieee)
{
ieee->rtllib_ips_leave_wq(ieee->dev);
if (IS_DOT11D_ENABLE(ieee)) {
if (IS_COUNTRY_IE_VALID(ieee))
RESET_CIE_WATCHDOG(ieee);
}
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
if (ieee->scanning_continue == 0) {
ieee->actscanning = true;
ieee->scanning_continue = 1;
schedule_delayed_work(&ieee->softmac_scan_wq, 0);
}
}
}
/* called with wx_mutex held */
void rtllib_start_scan_syncro(struct rtllib_device *ieee)
{
if (IS_DOT11D_ENABLE(ieee)) {
if (IS_COUNTRY_IE_VALID(ieee))
RESET_CIE_WATCHDOG(ieee);
}
ieee->sync_scan_hurryup = 0;
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
rtllib_softmac_scan_syncro(ieee);
}
EXPORT_SYMBOL(rtllib_start_scan_syncro);
static inline struct sk_buff *
rtllib_authentication_req(struct rtllib_network *beacon,
struct rtllib_device *ieee,
int challengelen, u8 *daddr)
{
struct sk_buff *skb;
struct rtllib_authentication *auth;
int len;
len = sizeof(struct rtllib_authentication) + challengelen +
ieee->tx_headroom + 4;
skb = dev_alloc_skb(len);
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
auth = skb_put(skb, sizeof(struct rtllib_authentication));
auth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_AUTH);
if (challengelen)
auth->header.frame_ctl |= cpu_to_le16(RTLLIB_FCTL_WEP);
auth->header.duration_id = cpu_to_le16(0x013a);
ether_addr_copy(auth->header.addr1, beacon->bssid);
ether_addr_copy(auth->header.addr2, ieee->dev->dev_addr);
ether_addr_copy(auth->header.addr3, beacon->bssid);
if (ieee->auth_mode == 0)
auth->algorithm = WLAN_AUTH_OPEN;
else if (ieee->auth_mode == 1)
auth->algorithm = cpu_to_le16(WLAN_AUTH_SHARED_KEY);
else if (ieee->auth_mode == 2)
auth->algorithm = WLAN_AUTH_OPEN;
auth->transaction = cpu_to_le16(ieee->associate_seq);
ieee->associate_seq++;
auth->status = cpu_to_le16(WLAN_STATUS_SUCCESS);
return skb;
}
static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
const u8 *dest)
{
u8 *tag;
int beacon_size;
struct rtllib_probe_response *beacon_buf;
struct sk_buff *skb = NULL;
int encrypt;
int atim_len, erp_len;
struct lib80211_crypt_data *crypt;
char *ssid = ieee->current_network.ssid;
int ssid_len = ieee->current_network.ssid_len;
int rate_len = ieee->current_network.rates_len + 2;
int rate_ex_len = ieee->current_network.rates_ex_len;
int wpa_ie_len = ieee->wpa_ie_len;
u8 erpinfo_content = 0;
u8 *tmp_ht_cap_buf = NULL;
u8 tmp_ht_cap_len = 0;
u8 *tmp_ht_info_buf = NULL;
u8 tmp_ht_info_len = 0;
struct rt_hi_throughput *ht_info = ieee->ht_info;
u8 *tmp_generic_ie_buf = NULL;
u8 tmp_generic_ie_len = 0;
if (rate_ex_len > 0)
rate_ex_len += 2;
if (ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
atim_len = 4;
else
atim_len = 0;
if ((ieee->current_network.mode == WIRELESS_MODE_G) ||
(ieee->current_network.mode == WIRELESS_MODE_N_24G &&
ieee->ht_info->bCurSuppCCK)) {
erp_len = 3;
erpinfo_content = 0;
if (ieee->current_network.buseprotection)
erpinfo_content |= ERP_UseProtection;
} else {
erp_len = 0;
}
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = crypt && crypt->ops &&
((strcmp(crypt->ops->name, "R-WEP") == 0 || wpa_ie_len));
if (ieee->ht_info->bCurrentHTSupport) {
tmp_ht_cap_buf = (u8 *)&(ieee->ht_info->SelfHTCap);
tmp_ht_cap_len = sizeof(ieee->ht_info->SelfHTCap);
tmp_ht_info_buf = (u8 *)&(ieee->ht_info->SelfHTInfo);
tmp_ht_info_len = sizeof(ieee->ht_info->SelfHTInfo);
HTConstructCapabilityElement(ieee, tmp_ht_cap_buf,
&tmp_ht_cap_len, encrypt, false);
HTConstructInfoElement(ieee, tmp_ht_info_buf, &tmp_ht_info_len,
encrypt);
if (ht_info->reg_rt2rt_aggregation) {
tmp_generic_ie_buf = ieee->ht_info->sz_rt2rt_agg_buf;
tmp_generic_ie_len =
sizeof(ieee->ht_info->sz_rt2rt_agg_buf);
HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf,
&tmp_generic_ie_len);
}
}
beacon_size = sizeof(struct rtllib_probe_response) + 2 +
ssid_len + 3 + rate_len + rate_ex_len + atim_len + erp_len
+ wpa_ie_len + ieee->tx_headroom;
skb = dev_alloc_skb(beacon_size);
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
beacon_buf = skb_put(skb, (beacon_size - ieee->tx_headroom));
ether_addr_copy(beacon_buf->header.addr1, dest);
ether_addr_copy(beacon_buf->header.addr2, ieee->dev->dev_addr);
ether_addr_copy(beacon_buf->header.addr3, ieee->current_network.bssid);
beacon_buf->header.duration_id = 0;
beacon_buf->beacon_interval =
cpu_to_le16(ieee->current_network.beacon_interval);
beacon_buf->capability =
cpu_to_le16(ieee->current_network.capability &
WLAN_CAPABILITY_IBSS);
beacon_buf->capability |=
cpu_to_le16(ieee->current_network.capability &
WLAN_CAPABILITY_SHORT_PREAMBLE);
if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
beacon_buf->capability |=
cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (encrypt)
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
beacon_buf->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_RESP);
beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
beacon_buf->info_element[0].len = ssid_len;
tag = (u8 *)beacon_buf->info_element[0].data;
memcpy(tag, ssid, ssid_len);
tag += ssid_len;
*(tag++) = MFIE_TYPE_RATES;
*(tag++) = rate_len - 2;
memcpy(tag, ieee->current_network.rates, rate_len - 2);
tag += rate_len - 2;
*(tag++) = MFIE_TYPE_DS_SET;
*(tag++) = 1;
*(tag++) = ieee->current_network.channel;
if (atim_len) {
u16 val16;
*(tag++) = MFIE_TYPE_IBSS_SET;
*(tag++) = 2;
val16 = ieee->current_network.atim_window;
memcpy((u8 *)tag, (u8 *)&val16, 2);
tag += 2;
}
if (erp_len) {
*(tag++) = MFIE_TYPE_ERP;
*(tag++) = 1;
*(tag++) = erpinfo_content;
}
if (rate_ex_len) {
*(tag++) = MFIE_TYPE_RATES_EX;
*(tag++) = rate_ex_len - 2;
memcpy(tag, ieee->current_network.rates_ex, rate_ex_len - 2);
tag += rate_ex_len - 2;
}
if (wpa_ie_len) {
if (ieee->iw_mode == IW_MODE_ADHOC)
memcpy(&ieee->wpa_ie[14], &ieee->wpa_ie[8], 4);
memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
tag += ieee->wpa_ie_len;
}
return skb;
}
static struct sk_buff *rtllib_null_func(struct rtllib_device *ieee, short pwr)
{
struct sk_buff *skb;
struct rtllib_hdr_3addr *hdr;
skb = dev_alloc_skb(sizeof(struct rtllib_hdr_3addr) + ieee->tx_headroom);
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
hdr = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
ether_addr_copy(hdr->addr1, ieee->current_network.bssid);
ether_addr_copy(hdr->addr2, ieee->dev->dev_addr);
ether_addr_copy(hdr->addr3, ieee->current_network.bssid);
hdr->frame_ctl = cpu_to_le16(RTLLIB_FTYPE_DATA |
RTLLIB_STYPE_NULLFUNC | RTLLIB_FCTL_TODS |
(pwr ? RTLLIB_FCTL_PM : 0));
return skb;
}
static struct sk_buff *rtllib_pspoll_func(struct rtllib_device *ieee)
{
struct sk_buff *skb;
struct rtllib_pspoll_hdr *hdr;
skb = dev_alloc_skb(sizeof(struct rtllib_pspoll_hdr) + ieee->tx_headroom);
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
hdr = skb_put(skb, sizeof(struct rtllib_pspoll_hdr));
ether_addr_copy(hdr->bssid, ieee->current_network.bssid);
ether_addr_copy(hdr->ta, ieee->dev->dev_addr);
hdr->aid = cpu_to_le16(ieee->assoc_id | 0xc000);
hdr->frame_ctl = cpu_to_le16(RTLLIB_FTYPE_CTL | RTLLIB_STYPE_PSPOLL |
RTLLIB_FCTL_PM);
return skb;
}
static void rtllib_resp_to_probe(struct rtllib_device *ieee, u8 *dest)
{
struct sk_buff *buf = rtllib_probe_resp(ieee, dest);
if (buf)
softmac_mgmt_xmit(buf, ieee);
}
static inline int SecIsInPMKIDList(struct rtllib_device *ieee, u8 *bssid)
{
int i = 0;
do {
if ((ieee->PMKIDList[i].bUsed) &&
(memcmp(ieee->PMKIDList[i].Bssid, bssid, ETH_ALEN) == 0))
break;
i++;
} while (i < NUM_PMKID_CACHE);
if (i == NUM_PMKID_CACHE)
i = -1;
return i;
}
static inline struct sk_buff *
rtllib_association_req(struct rtllib_network *beacon,
struct rtllib_device *ieee)
{
struct sk_buff *skb;
struct rtllib_assoc_request_frame *hdr;
u8 *tag, *ies;
int i;
u8 *ht_cap_buf = NULL;
u8 ht_cap_len = 0;
u8 *realtek_ie_buf = NULL;
u8 realtek_ie_len = 0;
int wpa_ie_len = ieee->wpa_ie_len;
int wps_ie_len = ieee->wps_ie_len;
unsigned int ckip_ie_len = 0;
unsigned int ccxrm_ie_len = 0;
unsigned int cxvernum_ie_len = 0;
struct lib80211_crypt_data *crypt;
int encrypt;
int PMKCacheIdx;
unsigned int rate_len = (beacon->rates_len ?
(beacon->rates_len + 2) : 0) +
(beacon->rates_ex_len ? (beacon->rates_ex_len) +
2 : 0);
unsigned int wmm_info_len = beacon->qos_data.supported ? 9 : 0;
unsigned int turbo_info_len = beacon->Turbo_Enable ? 9 : 0;
int len = 0;
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (crypt != NULL)
encrypt = crypt && crypt->ops &&
((strcmp(crypt->ops->name, "R-WEP") == 0 ||
wpa_ie_len));
else
encrypt = 0;
if ((ieee->rtllib_ap_sec_type &&
(ieee->rtllib_ap_sec_type(ieee) & SEC_ALG_TKIP)) ||
ieee->bForcedBgMode) {
ieee->ht_info->enable_ht = 0;
ieee->mode = WIRELESS_MODE_G;
}
if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) {
ht_cap_buf = (u8 *)&(ieee->ht_info->SelfHTCap);
ht_cap_len = sizeof(ieee->ht_info->SelfHTCap);
HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len,
encrypt, true);
if (ieee->ht_info->current_rt2rt_aggregation) {
realtek_ie_buf = ieee->ht_info->sz_rt2rt_agg_buf;
realtek_ie_len =
sizeof(ieee->ht_info->sz_rt2rt_agg_buf);
HTConstructRT2RTAggElement(ieee, realtek_ie_buf,
&realtek_ie_len);
}
}
if (beacon->bCkipSupported)
ckip_ie_len = 30 + 2;
if (beacon->bCcxRmEnable)
ccxrm_ie_len = 6 + 2;
if (beacon->BssCcxVerNumber >= 2)
cxvernum_ie_len = 5 + 2;
PMKCacheIdx = SecIsInPMKIDList(ieee, ieee->current_network.bssid);
if (PMKCacheIdx >= 0) {
wpa_ie_len += 18;
netdev_info(ieee->dev, "[PMK cache]: WPA2 IE length: %x\n",
wpa_ie_len);
}
len = sizeof(struct rtllib_assoc_request_frame) + 2
+ beacon->ssid_len
+ rate_len
+ wpa_ie_len
+ wps_ie_len
+ wmm_info_len
+ turbo_info_len
+ ht_cap_len
+ realtek_ie_len
+ ckip_ie_len
+ ccxrm_ie_len
+ cxvernum_ie_len
+ ieee->tx_headroom;
skb = dev_alloc_skb(len);
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
hdr = skb_put(skb, sizeof(struct rtllib_assoc_request_frame) + 2);
hdr->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_ASSOC_REQ);
hdr->header.duration_id = cpu_to_le16(37);
ether_addr_copy(hdr->header.addr1, beacon->bssid);
ether_addr_copy(hdr->header.addr2, ieee->dev->dev_addr);
ether_addr_copy(hdr->header.addr3, beacon->bssid);
ether_addr_copy(ieee->ap_mac_addr, beacon->bssid);
hdr->capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
if (beacon->capability & WLAN_CAPABILITY_PRIVACY)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
if (beacon->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
if (beacon->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
hdr->listen_interval = cpu_to_le16(beacon->listen_interval);
hdr->info_element[0].id = MFIE_TYPE_SSID;
hdr->info_element[0].len = beacon->ssid_len;
skb_put_data(skb, beacon->ssid, beacon->ssid_len);
tag = skb_put(skb, rate_len);
if (beacon->rates_len) {
*tag++ = MFIE_TYPE_RATES;
*tag++ = beacon->rates_len;
for (i = 0; i < beacon->rates_len; i++)
*tag++ = beacon->rates[i];
}
if (beacon->rates_ex_len) {
*tag++ = MFIE_TYPE_RATES_EX;
*tag++ = beacon->rates_ex_len;
for (i = 0; i < beacon->rates_ex_len; i++)
*tag++ = beacon->rates_ex[i];
}
if (beacon->bCkipSupported) {
static const u8 AironetIeOui[] = {0x00, 0x01, 0x66};
u8 CcxAironetBuf[30];
struct octet_string osCcxAironetIE;
memset(CcxAironetBuf, 0, 30);
osCcxAironetIE.Octet = CcxAironetBuf;
osCcxAironetIE.Length = sizeof(CcxAironetBuf);
memcpy(osCcxAironetIE.Octet, AironetIeOui,
sizeof(AironetIeOui));
osCcxAironetIE.Octet[IE_CISCO_FLAG_POSITION] |=
(SUPPORT_CKIP_PK | SUPPORT_CKIP_MIC);
tag = skb_put(skb, ckip_ie_len);
*tag++ = MFIE_TYPE_AIRONET;
*tag++ = osCcxAironetIE.Length;
memcpy(tag, osCcxAironetIE.Octet, osCcxAironetIE.Length);
tag += osCcxAironetIE.Length;
}
if (beacon->bCcxRmEnable) {
static const u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01,
0x00};
struct octet_string osCcxRmCap;
osCcxRmCap.Octet = (u8 *)CcxRmCapBuf;
osCcxRmCap.Length = sizeof(CcxRmCapBuf);
tag = skb_put(skb, ccxrm_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
*tag++ = osCcxRmCap.Length;
memcpy(tag, osCcxRmCap.Octet, osCcxRmCap.Length);
tag += osCcxRmCap.Length;
}
if (beacon->BssCcxVerNumber >= 2) {
u8 CcxVerNumBuf[] = {0x00, 0x40, 0x96, 0x03, 0x00};
struct octet_string osCcxVerNum;
CcxVerNumBuf[4] = beacon->BssCcxVerNumber;
osCcxVerNum.Octet = CcxVerNumBuf;
osCcxVerNum.Length = sizeof(CcxVerNumBuf);
tag = skb_put(skb, cxvernum_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
*tag++ = osCcxVerNum.Length;
memcpy(tag, osCcxVerNum.Octet, osCcxVerNum.Length);
tag += osCcxVerNum.Length;
}
if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) {
if (ieee->ht_info->ePeerHTSpecVer != HT_SPEC_VER_EWC) {
tag = skb_put(skb, ht_cap_len);
*tag++ = MFIE_TYPE_HT_CAP;
*tag++ = ht_cap_len - 2;
memcpy(tag, ht_cap_buf, ht_cap_len - 2);
tag += ht_cap_len - 2;
}
}
if (wpa_ie_len) {
skb_put_data(skb, ieee->wpa_ie, ieee->wpa_ie_len);
if (PMKCacheIdx >= 0) {
tag = skb_put(skb, 18);
*tag = 1;
*(tag + 1) = 0;
memcpy((tag + 2), &ieee->PMKIDList[PMKCacheIdx].PMKID,
16);
}
}
if (wmm_info_len) {
tag = skb_put(skb, wmm_info_len);
rtllib_WMM_Info(ieee, &tag);
}
if (wps_ie_len && ieee->wps_ie)
skb_put_data(skb, ieee->wps_ie, wps_ie_len);
if (turbo_info_len) {
tag = skb_put(skb, turbo_info_len);
rtllib_TURBO_Info(ieee, &tag);
}
if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) {
if (ieee->ht_info->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
tag = skb_put(skb, ht_cap_len);
*tag++ = MFIE_TYPE_GENERIC;
*tag++ = ht_cap_len - 2;
memcpy(tag, ht_cap_buf, ht_cap_len - 2);
tag += ht_cap_len - 2;
}
if (ieee->ht_info->current_rt2rt_aggregation) {
tag = skb_put(skb, realtek_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
*tag++ = realtek_ie_len - 2;
memcpy(tag, realtek_ie_buf, realtek_ie_len - 2);
}
}
kfree(ieee->assocreq_ies);
ieee->assocreq_ies = NULL;
ies = &(hdr->info_element[0].id);
ieee->assocreq_ies_len = (skb->data + skb->len) - ies;
ieee->assocreq_ies = kmemdup(ies, ieee->assocreq_ies_len, GFP_ATOMIC);
if (!ieee->assocreq_ies)
ieee->assocreq_ies_len = 0;
return skb;
}
static void rtllib_associate_abort(struct rtllib_device *ieee)
{
unsigned long flags;
spin_lock_irqsave(&ieee->lock, flags);
ieee->associate_seq++;
/* don't scan, and avoid to have the RX path possibily
* try again to associate. Even do not react to AUTH or
* ASSOC response. Just wait for the retry wq to be scheduled.
* Here we will check if there are good nets to associate
* with, so we retry or just get back to NO_LINK and scanning
*/
if (ieee->link_state == RTLLIB_ASSOCIATING_AUTHENTICATING) {
netdev_dbg(ieee->dev, "Authentication failed\n");
ieee->softmac_stats.no_auth_rs++;
} else {
netdev_dbg(ieee->dev, "Association failed\n");
ieee->softmac_stats.no_ass_rs++;
}
ieee->link_state = RTLLIB_ASSOCIATING_RETRY;
schedule_delayed_work(&ieee->associate_retry_wq,
RTLLIB_SOFTMAC_ASSOC_RETRY_TIME);
spin_unlock_irqrestore(&ieee->lock, flags);
}
static void rtllib_associate_abort_cb(struct timer_list *t)
{
struct rtllib_device *dev = from_timer(dev, t, associate_timer);
rtllib_associate_abort(dev);
}
static void rtllib_associate_step1(struct rtllib_device *ieee, u8 *daddr)
{
struct rtllib_network *beacon = &ieee->current_network;
struct sk_buff *skb;
netdev_dbg(ieee->dev, "Stopping scan\n");
ieee->softmac_stats.tx_auth_rq++;
skb = rtllib_authentication_req(beacon, ieee, 0, daddr);
if (!skb) {
rtllib_associate_abort(ieee);
} else {
ieee->link_state = RTLLIB_ASSOCIATING_AUTHENTICATING;
netdev_dbg(ieee->dev, "Sending authentication request\n");
softmac_mgmt_xmit(skb, ieee);
if (!timer_pending(&ieee->associate_timer)) {
ieee->associate_timer.expires = jiffies + (HZ / 2);
add_timer(&ieee->associate_timer);
}
}
}
static void rtllib_auth_challenge(struct rtllib_device *ieee, u8 *challenge,
int chlen)
{
u8 *c;
struct sk_buff *skb;
struct rtllib_network *beacon = &ieee->current_network;
ieee->associate_seq++;
ieee->softmac_stats.tx_auth_rq++;
skb = rtllib_authentication_req(beacon, ieee, chlen + 2, beacon->bssid);
if (!skb) {
rtllib_associate_abort(ieee);
} else {
c = skb_put(skb, chlen + 2);
*(c++) = MFIE_TYPE_CHALLENGE;
*(c++) = chlen;
memcpy(c, challenge, chlen);
netdev_dbg(ieee->dev,
"Sending authentication challenge response\n");
rtllib_encrypt_fragment(ieee, skb,
sizeof(struct rtllib_hdr_3addr));
softmac_mgmt_xmit(skb, ieee);
mod_timer(&ieee->associate_timer, jiffies + (HZ / 2));
}
kfree(challenge);
}
static void rtllib_associate_step2(struct rtllib_device *ieee)
{
struct sk_buff *skb;
struct rtllib_network *beacon = &ieee->current_network;
del_timer_sync(&ieee->associate_timer);
netdev_dbg(ieee->dev, "Sending association request\n");
ieee->softmac_stats.tx_ass_rq++;
skb = rtllib_association_req(beacon, ieee);
if (!skb) {
rtllib_associate_abort(ieee);
} else {
softmac_mgmt_xmit(skb, ieee);
mod_timer(&ieee->associate_timer, jiffies + (HZ / 2));
}
}
static void rtllib_associate_complete_wq(void *data)
{
struct rtllib_device *ieee = (struct rtllib_device *)
container_of(data,
struct rtllib_device,
associate_complete_wq);
struct rt_pwr_save_ctrl *psc = &ieee->pwr_save_ctrl;
netdev_info(ieee->dev, "Associated successfully with %pM\n",
ieee->current_network.bssid);
if (!ieee->is_silent_reset) {
netdev_info(ieee->dev, "normal associate\n");
notify_wx_assoc_event(ieee);
}
netif_carrier_on(ieee->dev);
ieee->is_roaming = false;
if (rtllib_is_54g(&ieee->current_network)) {
ieee->rate = 108;
netdev_info(ieee->dev, "Using G rates:%d\n", ieee->rate);
} else {
ieee->rate = 22;
ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_B);
netdev_info(ieee->dev, "Using B rates:%d\n", ieee->rate);
}
if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) {
netdev_info(ieee->dev, "Successfully associated, ht enabled\n");
HTOnAssocRsp(ieee);
} else {
netdev_info(ieee->dev,
"Successfully associated, ht not enabled(%d, %d)\n",
ieee->ht_info->bCurrentHTSupport,
ieee->ht_info->enable_ht);
memset(ieee->dot11ht_oper_rate_set, 0, 16);
}
ieee->link_detect_info.SlotNum = 2 * (1 +
ieee->current_network.beacon_interval /
500);
if (ieee->link_detect_info.NumRecvBcnInPeriod == 0 ||
ieee->link_detect_info.NumRecvDataInPeriod == 0) {
ieee->link_detect_info.NumRecvBcnInPeriod = 1;
ieee->link_detect_info.NumRecvDataInPeriod = 1;
}
psc->LpsIdleCount = 0;
ieee->link_change(ieee->dev);
if (ieee->is_silent_reset) {
netdev_info(ieee->dev, "silent reset associate\n");
ieee->is_silent_reset = false;
}
}
static void rtllib_sta_send_associnfo(struct rtllib_device *ieee)
{
}
static void rtllib_associate_complete(struct rtllib_device *ieee)
{
del_timer_sync(&ieee->associate_timer);
ieee->link_state = MAC80211_LINKED;
rtllib_sta_send_associnfo(ieee);
schedule_work(&ieee->associate_complete_wq);
}
static void rtllib_associate_procedure_wq(void *data)
{
struct rtllib_device *ieee = container_of_dwork_rsl(data,
struct rtllib_device,
associate_procedure_wq);
rtllib_stop_scan_syncro(ieee);
ieee->rtllib_ips_leave(ieee->dev);
mutex_lock(&ieee->wx_mutex);
rtllib_stop_scan(ieee);
HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
if (ieee->rf_power_state == rf_off) {
ieee->rtllib_ips_leave_wq(ieee->dev);
mutex_unlock(&ieee->wx_mutex);
return;
}
ieee->associate_seq = 1;
rtllib_associate_step1(ieee, ieee->current_network.bssid);
mutex_unlock(&ieee->wx_mutex);
}
inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
struct rtllib_network *net)
{
u8 tmp_ssid[IW_ESSID_MAX_SIZE + 1];
int tmp_ssid_len = 0;
short apset, ssidset, ssidbroad, apmatch, ssidmatch;
/* we are interested in new only if we are not associated
* and we are not associating / authenticating
*/
if (ieee->link_state != MAC80211_NOLINK)
return;
if ((ieee->iw_mode == IW_MODE_INFRA) && !(net->capability &
WLAN_CAPABILITY_ESS))
return;
if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability &
WLAN_CAPABILITY_IBSS))
return;
if ((ieee->iw_mode == IW_MODE_ADHOC) &&
(net->channel > ieee->ibss_maxjoin_chal))
return;
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
/* if the user specified the AP MAC, we need also the essid
* This could be obtained by beacons or, if the network does not
* broadcast it, it can be put manually.
*/
apset = ieee->wap_set;
ssidset = ieee->ssid_set;
ssidbroad = !(net->ssid_len == 0 || net->ssid[0] == '\0');
apmatch = (memcmp(ieee->current_network.bssid, net->bssid,
ETH_ALEN) == 0);
if (!ssidbroad) {
ssidmatch = (ieee->current_network.ssid_len ==
net->hidden_ssid_len) &&
(!strncmp(ieee->current_network.ssid,
net->hidden_ssid, net->hidden_ssid_len));
if (net->hidden_ssid_len > 0) {
strncpy(net->ssid, net->hidden_ssid,
net->hidden_ssid_len);
net->ssid_len = net->hidden_ssid_len;
ssidbroad = 1;
}
} else {
ssidmatch =
(ieee->current_network.ssid_len == net->ssid_len) &&
(!strncmp(ieee->current_network.ssid, net->ssid,
net->ssid_len));
}
/* if the user set the AP check if match.
* if the network does not broadcast essid we check the
* user supplied ANY essid
* if the network does broadcast and the user does not set
* essid it is OK
* if the network does broadcast and the user did set essid
* check if essid match
* if the ap is not set, check that the user set the bssid
* and the network does broadcast and that those two bssid match
*/
if ((apset && apmatch &&
((ssidset && ssidbroad && ssidmatch) ||
(ssidbroad && !ssidset) || (!ssidbroad && ssidset))) ||
(!apset && ssidset && ssidbroad && ssidmatch) ||
(ieee->is_roaming && ssidset && ssidbroad && ssidmatch)) {
/* Save the essid so that if it is hidden, it is
* replaced with the essid provided by the user.
*/
if (!ssidbroad) {
memcpy(tmp_ssid, ieee->current_network.ssid,
ieee->current_network.ssid_len);
tmp_ssid_len = ieee->current_network.ssid_len;
}
memcpy(&ieee->current_network, net,
sizeof(ieee->current_network));
if (!ssidbroad) {
memcpy(ieee->current_network.ssid, tmp_ssid,
tmp_ssid_len);
ieee->current_network.ssid_len = tmp_ssid_len;
}
netdev_info(ieee->dev,
"Linking with %s,channel:%d, qos:%d, myHT:%d, networkHT:%d, mode:%x cur_net.flags:0x%x\n",
ieee->current_network.ssid,
ieee->current_network.channel,
ieee->current_network.qos_data.supported,
ieee->ht_info->enable_ht,
ieee->current_network.bssht.bd_support_ht,
ieee->current_network.mode,
ieee->current_network.flags);
if ((rtllib_act_scanning(ieee, false)) &&
!(ieee->softmac_features & IEEE_SOFTMAC_SCAN))
rtllib_stop_scan_syncro(ieee);
HTResetIOTSetting(ieee->ht_info);
ieee->wmm_acm = 0;
if (ieee->iw_mode == IW_MODE_INFRA) {
/* Join the network for the first time */
ieee->AsocRetryCount = 0;
if ((ieee->current_network.qos_data.supported == 1) &&
ieee->current_network.bssht.bd_support_ht)
HTResetSelfAndSavePeerSetting(ieee,
&(ieee->current_network));
else
ieee->ht_info->bCurrentHTSupport =
false;
ieee->link_state = RTLLIB_ASSOCIATING;
schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
} else {
if (rtllib_is_54g(&ieee->current_network)) {
ieee->rate = 108;
ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_G);
netdev_info(ieee->dev,
"Using G rates\n");
} else {
ieee->rate = 22;
ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_B);
netdev_info(ieee->dev,
"Using B rates\n");
}
memset(ieee->dot11ht_oper_rate_set, 0, 16);
ieee->link_state = MAC80211_LINKED;
}
}
}
}
static void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
{
unsigned long flags;
struct rtllib_network *target;
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(target, &ieee->network_list, list) {
/* if the state become different that NOLINK means
* we had found what we are searching for
*/
if (ieee->link_state != MAC80211_NOLINK)
break;
if (ieee->scan_age == 0 || time_after(target->last_scanned +
ieee->scan_age, jiffies))
rtllib_softmac_new_net(ieee, target);
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
static inline int auth_parse(struct net_device *dev, struct sk_buff *skb,
u8 **challenge, int *chlen)
{
struct rtllib_authentication *a;
u8 *t;
if (skb->len < (sizeof(struct rtllib_authentication) -
sizeof(struct rtllib_info_element))) {
netdev_dbg(dev, "invalid len in auth resp: %d\n", skb->len);
return -EINVAL;
}
*challenge = NULL;
a = (struct rtllib_authentication *)skb->data;
if (skb->len > (sizeof(struct rtllib_authentication) + 3)) {
t = skb->data + sizeof(struct rtllib_authentication);
if (*(t++) == MFIE_TYPE_CHALLENGE) {
*chlen = *(t++);
*challenge = kmemdup(t, *chlen, GFP_ATOMIC);
if (!*challenge)
return -ENOMEM;
}
}
if (a->status) {
netdev_dbg(dev, "auth_parse() failed\n");
return -EINVAL;
}
return 0;
}
static short probe_rq_parse(struct rtllib_device *ieee, struct sk_buff *skb,
u8 *src)
{
u8 *tag;
u8 *skbend;
u8 *ssid = NULL;
u8 ssidlen = 0;
struct rtllib_hdr_3addr *header =
(struct rtllib_hdr_3addr *)skb->data;
bool bssid_match;
if (skb->len < sizeof(struct rtllib_hdr_3addr))
return -1; /* corrupted */
bssid_match =
(!ether_addr_equal(header->addr3, ieee->current_network.bssid)) &&
(!is_broadcast_ether_addr(header->addr3));
if (bssid_match)
return -1;
ether_addr_copy(src, header->addr2);
skbend = (u8 *)skb->data + skb->len;
tag = skb->data + sizeof(struct rtllib_hdr_3addr);
while (tag + 1 < skbend) {
if (*tag == 0) {
ssid = tag + 2;
ssidlen = *(tag + 1);
break;
}
tag++; /* point to the len field */
tag = tag + *(tag); /* point to the last data byte of the tag */
tag++; /* point to the next tag */
}
if (ssidlen == 0)
return 1;
if (!ssid)
return 1; /* ssid not found in tagged param */
return !strncmp(ssid, ieee->current_network.ssid, ssidlen);
}
static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
int *aid)
{
struct rtllib_assoc_response_frame *response_head;
u16 status_code;
if (skb->len < sizeof(struct rtllib_assoc_response_frame)) {
netdev_dbg(ieee->dev, "Invalid len in auth resp: %d\n",
skb->len);
return 0xcafe;
}
response_head = (struct rtllib_assoc_response_frame *)skb->data;
*aid = le16_to_cpu(response_head->aid) & 0x3fff;
status_code = le16_to_cpu(response_head->status);
if ((status_code == WLAN_STATUS_ASSOC_DENIED_RATES ||
status_code == WLAN_STATUS_CAPS_UNSUPPORTED) &&
((ieee->mode == WIRELESS_MODE_G) &&
(ieee->current_network.mode == WIRELESS_MODE_N_24G) &&
(ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT - 1)))) {
ieee->ht_info->iot_action |= HT_IOT_ACT_PURE_N_MODE;
} else {
ieee->AsocRetryCount = 0;
}
return le16_to_cpu(response_head->status);
}
void rtllib_rx_probe_rq(struct rtllib_device *ieee, struct sk_buff *skb)
{
u8 dest[ETH_ALEN];
ieee->softmac_stats.rx_probe_rq++;
if (probe_rq_parse(ieee, skb, dest) > 0) {
ieee->softmac_stats.tx_probe_rs++;
rtllib_resp_to_probe(ieee, dest);
}
}
void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr)
{
struct sk_buff *buf = rtllib_null_func(ieee, pwr);
if (buf)
softmac_ps_mgmt_xmit(buf, ieee);
}
EXPORT_SYMBOL(rtllib_sta_ps_send_null_frame);
void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee)
{
struct sk_buff *buf = rtllib_pspoll_func(ieee);
if (buf)
softmac_ps_mgmt_xmit(buf, ieee);
}
static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
{
int timeout;
u8 dtim;
struct rt_pwr_save_ctrl *psc = &ieee->pwr_save_ctrl;
if (ieee->LPSDelayCnt) {
ieee->LPSDelayCnt--;
return 0;
}
dtim = ieee->current_network.dtim_data;
if (!(dtim & RTLLIB_DTIM_VALID))
return 0;
timeout = ieee->current_network.beacon_interval;
ieee->current_network.dtim_data = RTLLIB_DTIM_INVALID;
/* there's no need to nofity AP that I find you buffered
* with broadcast packet
*/
if (dtim & (RTLLIB_DTIM_UCAST & ieee->ps))
return 2;
if (!time_after(jiffies,
dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
return 0;
if (!time_after(jiffies,
ieee->last_rx_ps_time + msecs_to_jiffies(timeout)))
return 0;
if ((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) &&
(ieee->mgmt_queue_tail != ieee->mgmt_queue_head))
return 0;
if (time) {
if (ieee->bAwakePktSent) {
psc->LPSAwakeIntvl = 1;
} else {
u8 MaxPeriod = 1;
if (psc->LPSAwakeIntvl == 0)
psc->LPSAwakeIntvl = 1;
if (psc->reg_max_lps_awake_intvl == 0)
MaxPeriod = 1;
else if (psc->reg_max_lps_awake_intvl == 0xFF)
MaxPeriod = ieee->current_network.dtim_period;
else
MaxPeriod = psc->reg_max_lps_awake_intvl;
psc->LPSAwakeIntvl = (psc->LPSAwakeIntvl >=
MaxPeriod) ? MaxPeriod :
(psc->LPSAwakeIntvl + 1);
}
{
u8 LPSAwakeIntvl_tmp = 0;
u8 period = ieee->current_network.dtim_period;
u8 count = ieee->current_network.tim.tim_count;
if (count == 0) {
if (psc->LPSAwakeIntvl > period)
LPSAwakeIntvl_tmp = period +
(psc->LPSAwakeIntvl -
period) -
((psc->LPSAwakeIntvl - period) %
period);
else
LPSAwakeIntvl_tmp = psc->LPSAwakeIntvl;
} else {
if (psc->LPSAwakeIntvl >
ieee->current_network.tim.tim_count)
LPSAwakeIntvl_tmp = count +
(psc->LPSAwakeIntvl - count) -
((psc->LPSAwakeIntvl - count) % period);
else
LPSAwakeIntvl_tmp = psc->LPSAwakeIntvl;
}
*time = ieee->current_network.last_dtim_sta_time
+ msecs_to_jiffies(ieee->current_network.beacon_interval *
LPSAwakeIntvl_tmp);
}
}
return 1;
}
static inline void rtllib_sta_ps(struct work_struct *work)
{
struct rtllib_device *ieee;
u64 time;
short sleep;
unsigned long flags, flags2;
ieee = container_of(work, struct rtllib_device, ps_task);
spin_lock_irqsave(&ieee->lock, flags);
if ((ieee->ps == RTLLIB_PS_DISABLED ||
ieee->iw_mode != IW_MODE_INFRA ||
ieee->link_state != MAC80211_LINKED)) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
rtllib_sta_wakeup(ieee, 1);
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
sleep = rtllib_sta_ps_sleep(ieee, &time);
/* 2 wake, 1 sleep, 0 do nothing */
if (sleep == 0)
goto out;
if (sleep == 1) {
if (ieee->sta_sleep == LPS_IS_SLEEP) {
ieee->enter_sleep_state(ieee->dev, time);
} else if (ieee->sta_sleep == LPS_IS_WAKE) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
if (ieee->ps_is_queue_empty(ieee->dev)) {
ieee->sta_sleep = LPS_WAIT_NULL_DATA_SEND;
ieee->ack_tx_to_ieee = 1;
rtllib_sta_ps_send_null_frame(ieee, 1);
ieee->ps_time = time;
}
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
ieee->bAwakePktSent = false;
} else if (sleep == 2) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
rtllib_sta_wakeup(ieee, 1);
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
out:
spin_unlock_irqrestore(&ieee->lock, flags);
}
static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
{
if (ieee->sta_sleep == LPS_IS_WAKE) {
if (nl) {
if (ieee->ht_info->iot_action &
HT_IOT_ACT_NULL_DATA_POWER_SAVING) {
ieee->ack_tx_to_ieee = 1;
rtllib_sta_ps_send_null_frame(ieee, 0);
} else {
ieee->ack_tx_to_ieee = 1;
rtllib_sta_ps_send_pspoll_frame(ieee);
}
}
return;
}
if (ieee->sta_sleep == LPS_IS_SLEEP)
ieee->sta_wake_up(ieee->dev);
if (nl) {
if (ieee->ht_info->iot_action &
HT_IOT_ACT_NULL_DATA_POWER_SAVING) {
ieee->ack_tx_to_ieee = 1;
rtllib_sta_ps_send_null_frame(ieee, 0);
} else {
ieee->ack_tx_to_ieee = 1;
ieee->polling = true;
rtllib_sta_ps_send_pspoll_frame(ieee);
}
} else {
ieee->sta_sleep = LPS_IS_WAKE;
ieee->polling = false;
}
}
void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success)
{
unsigned long flags, flags2;
spin_lock_irqsave(&ieee->lock, flags);
if (ieee->sta_sleep == LPS_WAIT_NULL_DATA_SEND) {
/* Null frame with PS bit set */
if (success) {
ieee->sta_sleep = LPS_IS_SLEEP;
ieee->enter_sleep_state(ieee->dev, ieee->ps_time);
}
/* if the card report not success we can't be sure the AP
* has not RXed so we can't assume the AP believe us awake
*/
} else {/* 21112005 - tx again null without PS bit if lost */
if ((ieee->sta_sleep == LPS_IS_WAKE) && !success) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
if (ieee->ht_info->iot_action &
HT_IOT_ACT_NULL_DATA_POWER_SAVING)
rtllib_sta_ps_send_null_frame(ieee, 0);
else
rtllib_sta_ps_send_pspoll_frame(ieee);
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
EXPORT_SYMBOL(rtllib_ps_tx_ack);
static void rtllib_process_action(struct rtllib_device *ieee,
struct sk_buff *skb)
{
struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u8 *act = rtllib_get_payload((struct rtllib_hdr *)header);
u8 category = 0;
if (act == NULL) {
netdev_warn(ieee->dev,
"Error getting payload of action frame\n");
return;
}
category = *act;
act++;
switch (category) {
case ACT_CAT_BA:
switch (*act) {
case ACT_ADDBAREQ:
rtllib_rx_ADDBAReq(ieee, skb);
break;
case ACT_ADDBARSP:
rtllib_rx_ADDBARsp(ieee, skb);
break;
case ACT_DELBA:
rtllib_rx_DELBA(ieee, skb);
break;
}
break;
default:
break;
}
}
static inline int
rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats)
{
u16 errcode;
int aid;
u8 *ies;
struct rtllib_assoc_response_frame *assoc_resp;
struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u16 frame_ctl = le16_to_cpu(header->frame_ctl);
netdev_dbg(ieee->dev, "received [RE]ASSOCIATION RESPONSE (%d)\n",
WLAN_FC_GET_STYPE(frame_ctl));
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
ieee->link_state == RTLLIB_ASSOCIATING_AUTHENTICATED &&
(ieee->iw_mode == IW_MODE_INFRA)) {
errcode = assoc_parse(ieee, skb, &aid);
if (!errcode) {
struct rtllib_network *network =
kzalloc(sizeof(struct rtllib_network),
GFP_ATOMIC);
if (!network)
return 1;
ieee->link_state = MAC80211_LINKED;
ieee->assoc_id = aid;
ieee->softmac_stats.rx_ass_ok++;
/* station support qos */
/* Let the register setting default with Legacy station */
assoc_resp = (struct rtllib_assoc_response_frame *)skb->data;
if (ieee->current_network.qos_data.supported == 1) {
if (rtllib_parse_info_param(ieee, assoc_resp->info_element,
rx_stats->len - sizeof(*assoc_resp),
network, rx_stats)) {
kfree(network);
return 1;
}
memcpy(ieee->ht_info->PeerHTCapBuf,
network->bssht.bd_ht_cap_buf,
network->bssht.bd_ht_cap_len);
memcpy(ieee->ht_info->PeerHTInfoBuf,
network->bssht.bd_ht_info_buf,
network->bssht.bd_ht_info_len);
ieee->handle_assoc_response(ieee->dev,
(struct rtllib_assoc_response_frame *)header, network);
}
kfree(network);
kfree(ieee->assocresp_ies);
ieee->assocresp_ies = NULL;
ies = &(assoc_resp->info_element[0].id);
ieee->assocresp_ies_len = (skb->data + skb->len) - ies;
ieee->assocresp_ies = kmemdup(ies,
ieee->assocresp_ies_len,
GFP_ATOMIC);
if (!ieee->assocresp_ies)
ieee->assocresp_ies_len = 0;
rtllib_associate_complete(ieee);
} else {
/* aid could not been allocated */
ieee->softmac_stats.rx_ass_err++;
netdev_info(ieee->dev,
"Association response status code 0x%x\n",
errcode);
if (ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT)
schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
else
rtllib_associate_abort(ieee);
}
}
return 0;
}
static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
{
int errcode;
u8 *challenge;
int chlen = 0;
bool bSupportNmode = true, bHalfSupportNmode = false;
errcode = auth_parse(ieee->dev, skb, &challenge, &chlen);
if (errcode) {
ieee->softmac_stats.rx_auth_rs_err++;
netdev_info(ieee->dev,
"Authentication response status code %d", errcode);
rtllib_associate_abort(ieee);
return;
}
if (ieee->open_wep || !challenge) {
ieee->link_state = RTLLIB_ASSOCIATING_AUTHENTICATED;
ieee->softmac_stats.rx_auth_rs_ok++;
if (!(ieee->ht_info->iot_action & HT_IOT_ACT_PURE_N_MODE)) {
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
if (IsHTHalfNmodeAPs(ieee)) {
bSupportNmode = true;
bHalfSupportNmode = true;
} else {
bSupportNmode = false;
bHalfSupportNmode = false;
}
}
}
/* Dummy wirless mode setting to avoid encryption issue */
if (bSupportNmode) {
ieee->set_wireless_mode(ieee->dev,
ieee->current_network.mode);
} else {
/*TODO*/
ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_G);
}
if ((ieee->current_network.mode == WIRELESS_MODE_N_24G) &&
bHalfSupportNmode) {
netdev_info(ieee->dev, "======>enter half N mode\n");
ieee->bHalfWirelessN24GMode = true;
} else {
ieee->bHalfWirelessN24GMode = false;
}
rtllib_associate_step2(ieee);
} else {
rtllib_auth_challenge(ieee, challenge, chlen);
}
}
static inline int
rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats)
{
if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) {
if (ieee->link_state == RTLLIB_ASSOCIATING_AUTHENTICATING &&
(ieee->iw_mode == IW_MODE_INFRA)) {
netdev_dbg(ieee->dev,
"Received authentication response");
rtllib_rx_auth_resp(ieee, skb);
}
}
return 0;
}
static inline int
rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u16 frame_ctl;
if (memcmp(header->addr3, ieee->current_network.bssid, ETH_ALEN) != 0)
return 0;
/* FIXME for now repeat all the association procedure
* both for disassociation and deauthentication
*/
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
ieee->link_state == MAC80211_LINKED &&
(ieee->iw_mode == IW_MODE_INFRA)) {
frame_ctl = le16_to_cpu(header->frame_ctl);
netdev_info(ieee->dev,
"==========>received disassoc/deauth(%x) frame, reason code:%x\n",
WLAN_FC_GET_STYPE(frame_ctl),
((struct rtllib_disassoc *)skb->data)->reason);
ieee->link_state = RTLLIB_ASSOCIATING;
ieee->softmac_stats.reassoc++;
ieee->is_roaming = true;
ieee->link_detect_info.bBusyTraffic = false;
rtllib_disassociate(ieee);
RemovePeerTS(ieee, header->addr2);
if (!(ieee->rtllib_ap_sec_type(ieee) &
(SEC_ALG_CCMP | SEC_ALG_TKIP)))
schedule_delayed_work(
&ieee->associate_procedure_wq, 5);
}
return 0;
}
inline int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats, u16 type,
u16 stype)
{
struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u16 frame_ctl;
if (!ieee->proto_started)
return 0;
frame_ctl = le16_to_cpu(header->frame_ctl);
switch (WLAN_FC_GET_STYPE(frame_ctl)) {
case RTLLIB_STYPE_ASSOC_RESP:
case RTLLIB_STYPE_REASSOC_RESP:
if (rtllib_rx_assoc_resp(ieee, skb, rx_stats) == 1)
return 1;
break;
case RTLLIB_STYPE_ASSOC_REQ:
case RTLLIB_STYPE_REASSOC_REQ:
break;
case RTLLIB_STYPE_AUTH:
rtllib_rx_auth(ieee, skb, rx_stats);
break;
case RTLLIB_STYPE_DISASSOC:
case RTLLIB_STYPE_DEAUTH:
rtllib_rx_deauth(ieee, skb);
break;
case RTLLIB_STYPE_MANAGE_ACT:
rtllib_process_action(ieee, skb);
break;
default:
return -1;
}
return 0;
}
/* following are for a simpler TX queue management.
* Instead of using netif_[stop/wake]_queue the driver
* will use these two functions (plus a reset one), that
* will internally use the kernel netif_* and takes
* care of the ieee802.11 fragmentation.
* So the driver receives a fragment per time and might
* call the stop function when it wants to not
* have enough room to TX an entire packet.
* This might be useful if each fragment needs it's own
* descriptor, thus just keep a total free memory > than
* the max fragmentation threshold is not enough.. If the
* ieee802.11 stack passed a TXB struct then you need
* to keep N free descriptors where
* N = MAX_PACKET_SIZE / MIN_FRAG_TRESHOLD
* In this way you need just one and the 802.11 stack
* will take care of buffering fragments and pass them to
* the driver later, when it wakes the queue.
*/
void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee)
{
unsigned int queue_index = txb->queue_index;
unsigned long flags;
int i;
struct cb_desc *tcb_desc = NULL;
unsigned long queue_len = 0;
spin_lock_irqsave(&ieee->lock, flags);
/* called with 2nd parm 0, no tx mgmt lock required */
rtllib_sta_wakeup(ieee, 0);
/* update the tx status */
tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb +
MAX_DEV_ADDR_SIZE);
if (tcb_desc->bMulticast)
ieee->stats.multicast++;
/* if xmit available, just xmit it immediately, else just insert it to
* the wait queue
*/
for (i = 0; i < txb->nr_frags; i++) {
queue_len = skb_queue_len(&ieee->skb_waitQ[queue_index]);
if ((queue_len != 0) ||
(!ieee->check_nic_enough_desc(ieee->dev, queue_index)) ||
(ieee->queue_stop)) {
/* insert the skb packet to the wait queue
* as for the completion function, it does not need
* to check it any more.
*/
if (queue_len < 200)
skb_queue_tail(&ieee->skb_waitQ[queue_index],
txb->fragments[i]);
else
kfree_skb(txb->fragments[i]);
} else {
ieee->softmac_data_hard_start_xmit(
txb->fragments[i],
ieee->dev, ieee->rate);
}
}
rtllib_txb_free(txb);
spin_unlock_irqrestore(&ieee->lock, flags);
}
void rtllib_reset_queue(struct rtllib_device *ieee)
{
unsigned long flags;
spin_lock_irqsave(&ieee->lock, flags);
init_mgmt_queue(ieee);
if (ieee->tx_pending.txb) {
rtllib_txb_free(ieee->tx_pending.txb);
ieee->tx_pending.txb = NULL;
}
ieee->queue_stop = 0;
spin_unlock_irqrestore(&ieee->lock, flags);
}
EXPORT_SYMBOL(rtllib_reset_queue);
void rtllib_stop_all_queues(struct rtllib_device *ieee)
{
unsigned int i;
for (i = 0; i < ieee->dev->num_tx_queues; i++)
txq_trans_cond_update(netdev_get_tx_queue(ieee->dev, i));
netif_tx_stop_all_queues(ieee->dev);
}
void rtllib_wake_all_queues(struct rtllib_device *ieee)
{
netif_tx_wake_all_queues(ieee->dev);
}
static void rtllib_start_monitor_mode(struct rtllib_device *ieee)
{
/* reset hardware status */
if (ieee->raw_tx)
netif_carrier_on(ieee->dev);
}
static void rtllib_start_ibss_wq(void *data)
{
struct rtllib_device *ieee = container_of_dwork_rsl(data,
struct rtllib_device, start_ibss_wq);
/* iwconfig mode ad-hoc will schedule this and return
* on the other hand this will block further iwconfig SET
* operations because of the wx_mutex hold.
* Anyway some most set operations set a flag to speed-up
* (abort) this wq (when syncro scanning) before sleeping
* on the mutex
*/
if (!ieee->proto_started) {
netdev_info(ieee->dev, "==========oh driver down return\n");
return;
}
mutex_lock(&ieee->wx_mutex);
if (ieee->current_network.ssid_len == 0) {
strscpy(ieee->current_network.ssid, RTLLIB_DEFAULT_TX_ESSID,
sizeof(ieee->current_network.ssid));
ieee->current_network.ssid_len = strlen(RTLLIB_DEFAULT_TX_ESSID);
ieee->ssid_set = 1;
}
ieee->link_state = MAC80211_NOLINK;
ieee->mode = WIRELESS_MODE_G;
/* check if we have this cell in our network list */
rtllib_softmac_check_all_nets(ieee);
/* if not then the state is not linked. Maybe the user switched to
* ad-hoc mode just after being in monitor mode, or just after
* being very few time in managed mode (so the card have had no
* time to scan all the chans..) or we have just run up the iface
* after setting ad-hoc mode. So we have to give another try..
* Here, in ibss mode, should be safe to do this without extra care
* (in bss mode we had to make sure no-one tried to associate when
* we had just checked the ieee->link_state and we was going to start the
* scan) because in ibss mode the rtllib_new_net function, when
* finds a good net, just set the ieee->link_state to MAC80211_LINKED,
* so, at worst, we waste a bit of time to initiate an unneeded syncro
* scan, that will stop at the first round because it sees the state
* associated.
*/
if (ieee->link_state == MAC80211_NOLINK)
rtllib_start_scan_syncro(ieee);
/* the network definitively is not here.. create a new cell */
if (ieee->link_state == MAC80211_NOLINK) {
netdev_info(ieee->dev, "creating new IBSS cell\n");
ieee->current_network.channel = ieee->bss_start_channel;
if (!ieee->wap_set)
eth_random_addr(ieee->current_network.bssid);
ieee->current_network.rates_len = 4;
ieee->current_network.rates[0] =
RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_1MB;
ieee->current_network.rates[1] =
RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_2MB;
ieee->current_network.rates[2] =
RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_5MB;
ieee->current_network.rates[3] =
RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_11MB;
ieee->current_network.rates_ex_len = 8;
ieee->current_network.rates_ex[0] =
RTLLIB_OFDM_RATE_6MB;
ieee->current_network.rates_ex[1] =
RTLLIB_OFDM_RATE_9MB;
ieee->current_network.rates_ex[2] =
RTLLIB_OFDM_RATE_12MB;
ieee->current_network.rates_ex[3] =
RTLLIB_OFDM_RATE_18MB;
ieee->current_network.rates_ex[4] =
RTLLIB_OFDM_RATE_24MB;
ieee->current_network.rates_ex[5] =
RTLLIB_OFDM_RATE_36MB;
ieee->current_network.rates_ex[6] =
RTLLIB_OFDM_RATE_48MB;
ieee->current_network.rates_ex[7] =
RTLLIB_OFDM_RATE_54MB;
ieee->rate = 108;
ieee->current_network.qos_data.supported = 0;
ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_G);
ieee->current_network.mode = ieee->mode;
ieee->current_network.atim_window = 0;
ieee->current_network.capability = WLAN_CAPABILITY_IBSS;
}
netdev_info(ieee->dev, "%s(): ieee->mode = %d\n", __func__, ieee->mode);
if (ieee->mode == WIRELESS_MODE_N_24G)
HTUseDefaultSetting(ieee);
else
ieee->ht_info->bCurrentHTSupport = false;
ieee->SetHwRegHandler(ieee->dev, HW_VAR_MEDIA_STATUS,
(u8 *)(&ieee->link_state));
ieee->link_state = MAC80211_LINKED;
ieee->link_change(ieee->dev);
HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
rtllib_start_send_beacons(ieee);
notify_wx_assoc_event(ieee);
netif_carrier_on(ieee->dev);
mutex_unlock(&ieee->wx_mutex);
}
inline void rtllib_start_ibss(struct rtllib_device *ieee)
{
schedule_delayed_work(&ieee->start_ibss_wq, msecs_to_jiffies(150));
}
/* this is called only in user context, with wx_mutex held */
static void rtllib_start_bss(struct rtllib_device *ieee)
{
unsigned long flags;
if (IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) {
if (!ieee->global_domain)
return;
}
/* check if we have already found the net we
* are interested in (if any).
* if not (we are disassociated and we are not
* in associating / authenticating phase) start the background scanning.
*/
rtllib_softmac_check_all_nets(ieee);
/* ensure no-one start an associating process (thus setting
* the ieee->link_state to rtllib_ASSOCIATING) while we
* have just checked it and we are going to enable scan.
* The rtllib_new_net function is always called with
* lock held (from both rtllib_softmac_check_all_nets and
* the rx path), so we cannot be in the middle of such function
*/
spin_lock_irqsave(&ieee->lock, flags);
if (ieee->link_state == MAC80211_NOLINK)
rtllib_start_scan(ieee);
spin_unlock_irqrestore(&ieee->lock, flags);
}
static void rtllib_link_change_wq(void *data)
{
struct rtllib_device *ieee = container_of_dwork_rsl(data,
struct rtllib_device, link_change_wq);
ieee->link_change(ieee->dev);
}
/* called only in userspace context */
void rtllib_disassociate(struct rtllib_device *ieee)
{
netif_carrier_off(ieee->dev);
if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)
rtllib_reset_queue(ieee);
if (IS_DOT11D_ENABLE(ieee))
dot11d_reset(ieee);
ieee->link_state = MAC80211_NOLINK;
ieee->is_set_key = false;
ieee->wap_set = 0;
schedule_delayed_work(&ieee->link_change_wq, 0);
notify_wx_assoc_event(ieee);
}
static void rtllib_associate_retry_wq(void *data)
{
struct rtllib_device *ieee = container_of_dwork_rsl(data,
struct rtllib_device, associate_retry_wq);
unsigned long flags;
mutex_lock(&ieee->wx_mutex);
if (!ieee->proto_started)
goto exit;
if (ieee->link_state != RTLLIB_ASSOCIATING_RETRY)
goto exit;
/* until we do not set the state to MAC80211_NOLINK
* there are no possibility to have someone else trying
* to start an association procedure (we get here with
* ieee->link_state = RTLLIB_ASSOCIATING).
* When we set the state to MAC80211_NOLINK it is possible
* that the RX path run an attempt to associate, but
* both rtllib_softmac_check_all_nets and the
* RX path works with ieee->lock held so there are no
* problems. If we are still disassociated then start a scan.
* the lock here is necessary to ensure no one try to start
* an association procedure when we have just checked the
* state and we are going to start the scan.
*/
ieee->beinretry = true;
ieee->link_state = MAC80211_NOLINK;
rtllib_softmac_check_all_nets(ieee);
spin_lock_irqsave(&ieee->lock, flags);
if (ieee->link_state == MAC80211_NOLINK)
rtllib_start_scan(ieee);
spin_unlock_irqrestore(&ieee->lock, flags);
ieee->beinretry = false;
exit:
mutex_unlock(&ieee->wx_mutex);
}
static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
{
static const u8 broadcast_addr[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
struct sk_buff *skb;
struct rtllib_probe_response *b;
skb = rtllib_probe_resp(ieee, broadcast_addr);
if (!skb)
return NULL;
b = (struct rtllib_probe_response *)skb->data;
b->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_BEACON);
return skb;
}
struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
{
struct sk_buff *skb;
struct rtllib_probe_response *b;
skb = rtllib_get_beacon_(ieee);
if (!skb)
return NULL;
b = (struct rtllib_probe_response *)skb->data;
b->header.seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
else
ieee->seq_ctrl[0]++;
return skb;
}
EXPORT_SYMBOL(rtllib_get_beacon);
void rtllib_softmac_stop_protocol(struct rtllib_device *ieee, u8 mesh_flag,
u8 shutdown)
{
rtllib_stop_scan_syncro(ieee);
mutex_lock(&ieee->wx_mutex);
rtllib_stop_protocol(ieee, shutdown);
mutex_unlock(&ieee->wx_mutex);
}
EXPORT_SYMBOL(rtllib_softmac_stop_protocol);
void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
{
if (!ieee->proto_started)
return;
if (shutdown) {
ieee->proto_started = 0;
ieee->proto_stoppping = 1;
ieee->rtllib_ips_leave(ieee->dev);
}
rtllib_stop_send_beacons(ieee);
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work_sync(&ieee->associate_retry_wq);
cancel_delayed_work_sync(&ieee->start_ibss_wq);
cancel_delayed_work_sync(&ieee->link_change_wq);
rtllib_stop_scan(ieee);
if (ieee->link_state <= RTLLIB_ASSOCIATING_AUTHENTICATED)
ieee->link_state = MAC80211_NOLINK;
if (ieee->link_state == MAC80211_LINKED) {
if (ieee->iw_mode == IW_MODE_INFRA)
SendDisassociation(ieee, 1, WLAN_REASON_DEAUTH_LEAVING);
rtllib_disassociate(ieee);
}
if (shutdown) {
RemoveAllTS(ieee);
ieee->proto_stoppping = 0;
}
kfree(ieee->assocreq_ies);
ieee->assocreq_ies = NULL;
ieee->assocreq_ies_len = 0;
kfree(ieee->assocresp_ies);
ieee->assocresp_ies = NULL;
ieee->assocresp_ies_len = 0;
}
void rtllib_softmac_start_protocol(struct rtllib_device *ieee, u8 mesh_flag)
{
mutex_lock(&ieee->wx_mutex);
rtllib_start_protocol(ieee);
mutex_unlock(&ieee->wx_mutex);
}
EXPORT_SYMBOL(rtllib_softmac_start_protocol);
void rtllib_start_protocol(struct rtllib_device *ieee)
{
short ch = 0;
int i = 0;
rtllib_update_active_chan_map(ieee);
if (ieee->proto_started)
return;
ieee->proto_started = 1;
if (ieee->current_network.channel == 0) {
do {
ch++;
if (ch > MAX_CHANNEL_NUMBER)
return; /* no channel found */
} while (!ieee->active_channel_map[ch]);
ieee->current_network.channel = ch;
}
if (ieee->current_network.beacon_interval == 0)
ieee->current_network.beacon_interval = 100;
for (i = 0; i < 17; i++) {
ieee->last_rxseq_num[i] = -1;
ieee->last_rxfrag_num[i] = -1;
ieee->last_packet_time[i] = 0;
}
ieee->wmm_acm = 0;
/* if the user set the MAC of the ad-hoc cell and then
* switch to managed mode, shall we make sure that association
* attempts does not fail just because the user provide the essid
* and the nic is still checking for the AP MAC ??
*/
switch (ieee->iw_mode) {
case IW_MODE_INFRA:
rtllib_start_bss(ieee);
break;
case IW_MODE_ADHOC:
rtllib_start_ibss(ieee);
break;
case IW_MODE_MONITOR:
rtllib_start_monitor_mode(ieee);
break;
}
}
int rtllib_softmac_init(struct rtllib_device *ieee)
{
int i;
memset(&ieee->current_network, 0, sizeof(struct rtllib_network));
ieee->link_state = MAC80211_NOLINK;
for (i = 0; i < 5; i++)
ieee->seq_ctrl[i] = 0;
ieee->dot11d_info = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
if (!ieee->dot11d_info)
return -ENOMEM;
ieee->link_detect_info.SlotIndex = 0;
ieee->link_detect_info.SlotNum = 2;
ieee->link_detect_info.NumRecvBcnInPeriod = 0;
ieee->link_detect_info.NumRecvDataInPeriod = 0;
ieee->link_detect_info.NumTxOkInPeriod = 0;
ieee->link_detect_info.NumRxOkInPeriod = 0;
ieee->link_detect_info.NumRxUnicastOkInPeriod = 0;
ieee->bIsAggregateFrame = false;
ieee->assoc_id = 0;
ieee->queue_stop = 0;
ieee->scanning_continue = 0;
ieee->softmac_features = 0;
ieee->wap_set = 0;
ieee->ssid_set = 0;
ieee->proto_started = 0;
ieee->proto_stoppping = 0;
ieee->basic_rate = RTLLIB_DEFAULT_BASIC_RATE;
ieee->rate = 22;
ieee->ps = RTLLIB_PS_DISABLED;
ieee->sta_sleep = LPS_IS_WAKE;
ieee->reg_dot11ht_oper_rate_set[0] = 0xff;
ieee->reg_dot11ht_oper_rate_set[1] = 0xff;
ieee->reg_dot11ht_oper_rate_set[4] = 0x01;
ieee->reg_dot11tx_ht_oper_rate_set[0] = 0xff;
ieee->reg_dot11tx_ht_oper_rate_set[1] = 0xff;
ieee->reg_dot11tx_ht_oper_rate_set[4] = 0x01;
ieee->FirstIe_InScan = false;
ieee->actscanning = false;
ieee->beinretry = false;
ieee->is_set_key = false;
init_mgmt_queue(ieee);
ieee->tx_pending.txb = NULL;
timer_setup(&ieee->associate_timer, rtllib_associate_abort_cb, 0);
timer_setup(&ieee->beacon_timer, rtllib_send_beacon_cb, 0);
INIT_DELAYED_WORK(&ieee->link_change_wq, (void *)rtllib_link_change_wq);
INIT_DELAYED_WORK(&ieee->start_ibss_wq, (void *)rtllib_start_ibss_wq);
INIT_WORK(&ieee->associate_complete_wq, (void *)rtllib_associate_complete_wq);
INIT_DELAYED_WORK(&ieee->associate_procedure_wq, (void *)rtllib_associate_procedure_wq);
INIT_DELAYED_WORK(&ieee->softmac_scan_wq, (void *)rtllib_softmac_scan_wq);
INIT_DELAYED_WORK(&ieee->associate_retry_wq, (void *)rtllib_associate_retry_wq);
INIT_WORK(&ieee->wx_sync_scan_wq, (void *)rtllib_wx_sync_scan_wq);
mutex_init(&ieee->wx_mutex);
mutex_init(&ieee->scan_mutex);
mutex_init(&ieee->ips_mutex);
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
INIT_WORK(&ieee->ps_task, rtllib_sta_ps);
return 0;
}
void rtllib_softmac_free(struct rtllib_device *ieee)
{
mutex_lock(&ieee->wx_mutex);
kfree(ieee->dot11d_info);
ieee->dot11d_info = NULL;
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work_sync(&ieee->associate_retry_wq);
cancel_delayed_work_sync(&ieee->associate_procedure_wq);
cancel_delayed_work_sync(&ieee->softmac_scan_wq);
cancel_delayed_work_sync(&ieee->start_ibss_wq);
cancel_delayed_work_sync(&ieee->hw_wakeup_wq);
cancel_delayed_work_sync(&ieee->hw_sleep_wq);
cancel_delayed_work_sync(&ieee->link_change_wq);
cancel_work_sync(&ieee->associate_complete_wq);
cancel_work_sync(&ieee->ips_leave_wq);
cancel_work_sync(&ieee->wx_sync_scan_wq);
cancel_work_sync(&ieee->ps_task);
mutex_unlock(&ieee->wx_mutex);
}
static inline struct sk_buff *
rtllib_disauth_skb(struct rtllib_network *beacon,
struct rtllib_device *ieee, u16 asRsn)
{
struct sk_buff *skb;
struct rtllib_disauth *disauth;
int len = sizeof(struct rtllib_disauth) + ieee->tx_headroom;
skb = dev_alloc_skb(len);
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
disauth = skb_put(skb, sizeof(struct rtllib_disauth));
disauth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_DEAUTH);
disauth->header.duration_id = 0;
ether_addr_copy(disauth->header.addr1, beacon->bssid);
ether_addr_copy(disauth->header.addr2, ieee->dev->dev_addr);
ether_addr_copy(disauth->header.addr3, beacon->bssid);
disauth->reason = cpu_to_le16(asRsn);
return skb;
}
static inline struct sk_buff *
rtllib_disassociate_skb(struct rtllib_network *beacon,
struct rtllib_device *ieee, u16 asRsn)
{
struct sk_buff *skb;
struct rtllib_disassoc *disass;
int len = sizeof(struct rtllib_disassoc) + ieee->tx_headroom;
skb = dev_alloc_skb(len);
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
disass = skb_put(skb, sizeof(struct rtllib_disassoc));
disass->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_DISASSOC);
disass->header.duration_id = 0;
ether_addr_copy(disass->header.addr1, beacon->bssid);
ether_addr_copy(disass->header.addr2, ieee->dev->dev_addr);
ether_addr_copy(disass->header.addr3, beacon->bssid);
disass->reason = cpu_to_le16(asRsn);
return skb;
}
void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn)
{
struct rtllib_network *beacon = &ieee->current_network;
struct sk_buff *skb;
if (deauth)
skb = rtllib_disauth_skb(beacon, ieee, asRsn);
else
skb = rtllib_disassociate_skb(beacon, ieee, asRsn);
if (skb)
softmac_mgmt_xmit(skb, ieee);
}
u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
{
static u8 ccmp_ie[4] = {0x00, 0x50, 0xf2, 0x04};
static u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
int wpa_ie_len = ieee->wpa_ie_len;
struct lib80211_crypt_data *crypt;
int encrypt;
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = (ieee->current_network.capability & WLAN_CAPABILITY_PRIVACY)
|| (crypt && crypt->ops && (strcmp(crypt->ops->name, "R-WEP") == 0));
/* simply judge */
if (encrypt && (wpa_ie_len == 0)) {
return SEC_ALG_WEP;
} else if ((wpa_ie_len != 0)) {
if (((ieee->wpa_ie[0] == 0xdd) &&
(!memcmp(&(ieee->wpa_ie[14]), ccmp_ie, 4))) ||
((ieee->wpa_ie[0] == 0x30) &&
(!memcmp(&ieee->wpa_ie[10], ccmp_rsn_ie, 4))))
return SEC_ALG_CCMP;
else
return SEC_ALG_TKIP;
} else {
return SEC_ALG_NONE;
}
}
static void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
{
u8 OpMode;
u8 i;
bool bFilterOutNonAssociatedBSSID = false;
rtllib->link_state = MAC80211_NOLINK;
for (i = 0; i < 6; i++)
rtllib->current_network.bssid[i] = 0x55;
rtllib->OpMode = RT_OP_MODE_NO_LINK;
rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_BSSID,
rtllib->current_network.bssid);
OpMode = RT_OP_MODE_NO_LINK;
rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_MEDIA_STATUS, &OpMode);
rtllib_stop_send_beacons(rtllib);
bFilterOutNonAssociatedBSSID = false;
rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_CECHK_BSSID,
(u8 *)(&bFilterOutNonAssociatedBSSID));
notify_wx_assoc_event(rtllib);
}
static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
u8 *asSta, u8 asRsn)
{
u8 i;
u8 OpMode;
RemovePeerTS(rtllib, asSta);
if (memcmp(rtllib->current_network.bssid, asSta, 6) == 0) {
rtllib->link_state = MAC80211_NOLINK;
for (i = 0; i < 6; i++)
rtllib->current_network.bssid[i] = 0x22;
OpMode = RT_OP_MODE_NO_LINK;
rtllib->OpMode = RT_OP_MODE_NO_LINK;
rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_MEDIA_STATUS,
(u8 *)(&OpMode));
rtllib_disassociate(rtllib);
rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_BSSID,
rtllib->current_network.bssid);
}
}
static void
rtllib_MgntDisconnectAP(
struct rtllib_device *rtllib,
u8 asRsn
)
{
bool bFilterOutNonAssociatedBSSID = false;
bFilterOutNonAssociatedBSSID = false;
rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_CECHK_BSSID,
(u8 *)(&bFilterOutNonAssociatedBSSID));
rtllib_MlmeDisassociateRequest(rtllib, rtllib->current_network.bssid,
asRsn);
rtllib->link_state = MAC80211_NOLINK;
}
bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn)
{
if (rtllib->ps != RTLLIB_PS_DISABLED)
rtllib->sta_wake_up(rtllib->dev);
if (rtllib->link_state == MAC80211_LINKED) {
if (rtllib->iw_mode == IW_MODE_ADHOC)
rtllib_MgntDisconnectIBSS(rtllib);
if (rtllib->iw_mode == IW_MODE_INFRA)
rtllib_MgntDisconnectAP(rtllib, asRsn);
}
return true;
}
EXPORT_SYMBOL(rtllib_MgntDisconnect);
void notify_wx_assoc_event(struct rtllib_device *ieee)
{
union iwreq_data wrqu;
if (ieee->cannot_notify)
return;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
if (ieee->link_state == MAC80211_LINKED) {
memcpy(wrqu.ap_addr.sa_data, ieee->current_network.bssid,
ETH_ALEN);
} else {
netdev_info(ieee->dev, "%s(): Tell user space disconnected\n",
__func__);
eth_zero_addr(wrqu.ap_addr.sa_data);
}
wireless_send_event(ieee->dev, SIOCGIWAP, &wrqu, NULL);
}
EXPORT_SYMBOL(notify_wx_assoc_event);
| linux-master | drivers/staging/rtl8192e/rtllib_softmac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtl_core.h"
#include "r8192E_hw.h"
#include "r8190P_rtl8256.h"
#include "rtl_pm.h"
int rtl92e_suspend(struct device *dev_d)
{
struct net_device *dev = dev_get_drvdata(dev_d);
struct r8192_priv *priv = rtllib_priv(dev);
u32 ulRegRead;
netdev_info(dev, "============> r8192E suspend call.\n");
del_timer_sync(&priv->gpio_polling_timer);
cancel_delayed_work_sync(&priv->gpio_change_rf_wq);
priv->polling_timer_on = 0;
if (!netif_running(dev)) {
netdev_info(dev,
"RTL819XE:UI is open out of suspend function\n");
goto out_pci_suspend;
}
if (dev->netdev_ops->ndo_stop)
dev->netdev_ops->ndo_stop(dev);
netif_device_detach(dev);
if (!priv->rtllib->bSupportRemoteWakeUp) {
rtl92e_set_rf_state(dev, rf_off, RF_CHANGE_BY_INIT);
ulRegRead = rtl92e_readl(dev, CPU_GEN);
ulRegRead |= CPU_GEN_SYSTEM_RESET;
rtl92e_writel(dev, CPU_GEN, ulRegRead);
} else {
rtl92e_writel(dev, WFCRC0, 0xffffffff);
rtl92e_writel(dev, WFCRC1, 0xffffffff);
rtl92e_writel(dev, WFCRC2, 0xffffffff);
rtl92e_writeb(dev, PMR, 0x5);
rtl92e_writeb(dev, MAC_BLK_CTRL, 0xa);
}
out_pci_suspend:
netdev_info(dev, "WOL is %s\n", priv->rtllib->bSupportRemoteWakeUp ?
"Supported" : "Not supported");
device_set_wakeup_enable(dev_d, priv->rtllib->bSupportRemoteWakeUp);
mdelay(20);
return 0;
}
int rtl92e_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *dev = dev_get_drvdata(dev_d);
struct r8192_priv *priv = rtllib_priv(dev);
u32 val;
netdev_info(dev, "================>r8192E resume call.\n");
pci_read_config_dword(pdev, 0x40, &val);
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
device_wakeup_disable(dev_d);
if (priv->polling_timer_on == 0)
rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
if (!netif_running(dev)) {
netdev_info(dev,
"RTL819XE:UI is open out of resume function\n");
goto out;
}
netif_device_attach(dev);
if (dev->netdev_ops->ndo_open)
dev->netdev_ops->ndo_open(dev);
if (!priv->rtllib->bSupportRemoteWakeUp)
rtl92e_set_rf_state(dev, rf_on, RF_CHANGE_BY_INIT);
out:
return 0;
}
| linux-master | drivers/staging/rtl8192e/rtl8192e/rtl_pm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtl_core.h"
#include "r8192E_hw.h"
#include "table.h"
#include "r8192E_firmware.h"
#include "r8192E_cmdpkt.h"
#include <linux/firmware.h>
static bool _rtl92e_wait_for_fw(struct net_device *dev, u32 mask, u32 timeout)
{
unsigned long deadline = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, deadline)) {
if (rtl92e_readl(dev, CPU_GEN) & mask)
return true;
mdelay(2);
}
return false;
}
static bool _rtl92e_fw_boot_cpu(struct net_device *dev)
{
u32 CPU_status = 0;
if (!_rtl92e_wait_for_fw(dev, CPU_GEN_PUT_CODE_OK, 200)) {
netdev_err(dev, "Firmware download failed.\n");
return false;
}
netdev_dbg(dev, "Download Firmware: Put code ok!\n");
CPU_status = rtl92e_readl(dev, CPU_GEN);
rtl92e_writeb(dev, CPU_GEN, (CPU_status | CPU_GEN_PWR_STB_CPU) & 0xff);
mdelay(1);
if (!_rtl92e_wait_for_fw(dev, CPU_GEN_BOOT_RDY, 200)) {
netdev_err(dev, "Firmware boot failed.\n");
return false;
}
netdev_dbg(dev, "Download Firmware: Boot ready!\n");
return true;
}
static bool _rtl92e_fw_check_ready(struct net_device *dev,
u8 load_fw_status)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_firmware *pfirmware = priv->fw_info;
bool rt_status = true;
switch (load_fw_status) {
case FW_INIT_STEP0_BOOT:
pfirmware->status = FW_STATUS_1_MOVE_BOOT_CODE;
break;
case FW_INIT_STEP1_MAIN:
pfirmware->status = FW_STATUS_2_MOVE_MAIN_CODE;
rt_status = _rtl92e_fw_boot_cpu(dev);
if (rt_status)
pfirmware->status = FW_STATUS_3_TURNON_CPU;
else
netdev_dbg(dev, "_rtl92e_fw_boot_cpu fail!\n");
break;
case FW_INIT_STEP2_DATA:
pfirmware->status = FW_STATUS_4_MOVE_DATA_CODE;
mdelay(1);
rt_status = _rtl92e_wait_for_fw(dev, CPU_GEN_FIRM_RDY, 20);
if (rt_status)
pfirmware->status = FW_STATUS_5_READY;
break;
default:
rt_status = false;
netdev_dbg(dev, "Unknown firmware status");
break;
}
return rt_status;
}
static bool _rtl92e_fw_prepare(struct net_device *dev, struct rt_fw_blob *blob,
const char *name, u8 padding)
{
const struct firmware *fw;
int rc, i;
bool ret = true;
rc = request_firmware(&fw, name, &dev->dev);
if (rc < 0)
return false;
if (round_up(fw->size, 4) > MAX_FW_SIZE - padding) {
netdev_err(dev, "Firmware image %s too big for the device.\n",
name);
ret = false;
goto out;
}
if (padding)
memset(blob->data, 0, padding);
if (fw->size % 4)
memset(blob->data + padding + fw->size, 0, 4);
memcpy(blob->data + padding, fw->data, fw->size);
blob->size = round_up(fw->size, 4) + padding;
/* Swap endian - firmware is packaged in invalid endiannes*/
for (i = padding; i < blob->size; i += 4) {
u32 *data = (u32 *)(blob->data + i);
*data = swab32p(data);
}
out:
release_firmware(fw);
return ret;
}
bool rtl92e_init_fw(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
bool rt_status = true;
u32 file_length = 0;
u8 *mapped_file = NULL;
u8 i = 0;
enum opt_rst_type rst_opt = OPT_SYSTEM_RESET;
enum firmware_init_step starting_state = FW_INIT_STEP0_BOOT;
struct rt_firmware *pfirmware = priv->fw_info;
netdev_dbg(dev, " PlatformInitFirmware()==>\n");
if (pfirmware->status == FW_STATUS_0_INIT) {
rst_opt = OPT_SYSTEM_RESET;
starting_state = FW_INIT_STEP0_BOOT;
} else if (pfirmware->status == FW_STATUS_5_READY) {
rst_opt = OPT_FIRMWARE_RESET;
starting_state = FW_INIT_STEP2_DATA;
}
for (i = starting_state; i <= FW_INIT_STEP2_DATA; i++) {
if (rst_opt == OPT_SYSTEM_RESET) {
if (pfirmware->blobs[i].size == 0) {
const char *fw_name[3] = {
RTL8192E_BOOT_IMG_FW,
RTL8192E_MAIN_IMG_FW,
RTL8192E_DATA_IMG_FW
};
int pad = 0;
if (i == FW_INIT_STEP1_MAIN)
pad = 128;
if (!_rtl92e_fw_prepare(dev,
&pfirmware->blobs[i],
fw_name[i],
pad))
goto download_firmware_fail;
}
}
mapped_file = pfirmware->blobs[i].data;
file_length = pfirmware->blobs[i].size;
rt_status = rtl92e_send_cmd_pkt(dev, DESC_PACKET_TYPE_INIT,
mapped_file, file_length);
if (!rt_status)
goto download_firmware_fail;
if (!_rtl92e_fw_check_ready(dev, i))
goto download_firmware_fail;
}
netdev_dbg(dev, "Firmware Download Success\n");
return rt_status;
download_firmware_fail:
netdev_err(dev, "%s: Failed to initialize firmware.\n", __func__);
return false;
}
| linux-master | drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "table.h"
u32 RTL8192E_PHY_REG_1T2R_ARR[RTL8192E_PHY_REG_1T2R_ARR_LEN] = {
0x800, 0x00000000,
0x804, 0x00000001,
0x808, 0x0000fc00,
0x80c, 0x0000001c,
0x810, 0x801010aa,
0x814, 0x008514d0,
0x818, 0x00000040,
0x81c, 0x00000000,
0x820, 0x00000004,
0x824, 0x00690000,
0x828, 0x00000004,
0x82c, 0x00e90000,
0x830, 0x00000004,
0x834, 0x00690000,
0x838, 0x00000004,
0x83c, 0x00e90000,
0x840, 0x00000000,
0x844, 0x00000000,
0x848, 0x00000000,
0x84c, 0x00000000,
0x850, 0x00000000,
0x854, 0x00000000,
0x858, 0x65a965a9,
0x85c, 0x65a965a9,
0x860, 0x001f0010,
0x864, 0x007f0010,
0x868, 0x001f0010,
0x86c, 0x007f0010,
0x870, 0x0f100f70,
0x874, 0x0f100f70,
0x878, 0x00000000,
0x87c, 0x00000000,
0x880, 0x6870e36c,
0x884, 0xe3573600,
0x888, 0x4260c340,
0x88c, 0x0000ff00,
0x890, 0x00000000,
0x894, 0xfffffffe,
0x898, 0x4c42382f,
0x89c, 0x00656056,
0x8b0, 0x00000000,
0x8e0, 0x00000000,
0x8e4, 0x00000000,
0x900, 0x00000000,
0x904, 0x00000023,
0x908, 0x00000000,
0x90c, 0x31121311,
0xa00, 0x00d0c7d8,
0xa04, 0x811f0008,
0xa08, 0x80cd8300,
0xa0c, 0x2e62740f,
0xa10, 0x95009b78,
0xa14, 0x11145008,
0xa18, 0x00881117,
0xa1c, 0x89140fa0,
0xa20, 0x1a1b0000,
0xa24, 0x090e1317,
0xa28, 0x00000204,
0xa2c, 0x00000000,
0xc00, 0x00000040,
0xc04, 0x00005433,
0xc08, 0x000000e4,
0xc0c, 0x6c6c6c6c,
0xc10, 0x08800000,
0xc14, 0x40000100,
0xc18, 0x08000000,
0xc1c, 0x40000100,
0xc20, 0x08000000,
0xc24, 0x40000100,
0xc28, 0x08000000,
0xc2c, 0x40000100,
0xc30, 0x6de9ac44,
0xc34, 0x465c52cd,
0xc38, 0x497f5994,
0xc3c, 0x0a969764,
0xc40, 0x1f7c403f,
0xc44, 0x000100b7,
0xc48, 0xec020000,
0xc4c, 0x00000300,
0xc50, 0x69543420,
0xc54, 0x433c0094,
0xc58, 0x69543420,
0xc5c, 0x433c0094,
0xc60, 0x69543420,
0xc64, 0x433c0094,
0xc68, 0x69543420,
0xc6c, 0x433c0094,
0xc70, 0x2c7f000d,
0xc74, 0x0186175b,
0xc78, 0x0000001f,
0xc7c, 0x00b91612,
0xc80, 0x40000100,
0xc84, 0x20000000,
0xc88, 0x40000100,
0xc8c, 0x20200000,
0xc90, 0x40000100,
0xc94, 0x00000000,
0xc98, 0x40000100,
0xc9c, 0x00000000,
0xca0, 0x00492492,
0xca4, 0x00000000,
0xca8, 0x00000000,
0xcac, 0x00000000,
0xcb0, 0x00000000,
0xcb4, 0x00000000,
0xcb8, 0x00000000,
0xcbc, 0x00492492,
0xcc0, 0x00000000,
0xcc4, 0x00000000,
0xcc8, 0x00000000,
0xccc, 0x00000000,
0xcd0, 0x00000000,
0xcd4, 0x00000000,
0xcd8, 0x64b22427,
0xcdc, 0x00766932,
0xce0, 0x00222222,
0xd00, 0x00000750,
0xd04, 0x00000403,
0xd08, 0x0000907f,
0xd0c, 0x00000001,
0xd10, 0xa0633333,
0xd14, 0x33333c63,
0xd18, 0x6a8f5b6b,
0xd1c, 0x00000000,
0xd20, 0x00000000,
0xd24, 0x00000000,
0xd28, 0x00000000,
0xd2c, 0xcc979975,
0xd30, 0x00000000,
0xd34, 0x00000000,
0xd38, 0x00000000,
0xd3c, 0x00027293,
0xd40, 0x00000000,
0xd44, 0x00000000,
0xd48, 0x00000000,
0xd4c, 0x00000000,
0xd50, 0x6437140a,
0xd54, 0x024dbd02,
0xd58, 0x00000000,
0xd5c, 0x04032064,
0xe00, 0x161a1a1a,
0xe04, 0x12121416,
0xe08, 0x00001800,
0xe0c, 0x00000000,
0xe10, 0x161a1a1a,
0xe14, 0x12121416,
0xe18, 0x161a1a1a,
0xe1c, 0x12121416,
};
u32 RTL8192E_RADIO_A_ARR[RTL8192E_RADIO_A_ARR_LEN] = {
0x019, 0x00000003,
0x000, 0x000000bf,
0x001, 0x00000ee0,
0x002, 0x0000004c,
0x003, 0x000007f1,
0x004, 0x00000975,
0x005, 0x00000c58,
0x006, 0x00000ae6,
0x007, 0x000000ca,
0x008, 0x00000e1c,
0x009, 0x000007f0,
0x00a, 0x000009d0,
0x00b, 0x000001ba,
0x00c, 0x00000240,
0x00e, 0x00000020,
0x00f, 0x00000990,
0x012, 0x00000806,
0x014, 0x000005ab,
0x015, 0x00000f80,
0x016, 0x00000020,
0x017, 0x00000597,
0x018, 0x0000050a,
0x01a, 0x00000f80,
0x01b, 0x00000f5e,
0x01c, 0x00000008,
0x01d, 0x00000607,
0x01e, 0x000006cc,
0x01f, 0x00000000,
0x020, 0x000001a5,
0x01f, 0x00000001,
0x020, 0x00000165,
0x01f, 0x00000002,
0x020, 0x000000c6,
0x01f, 0x00000003,
0x020, 0x00000086,
0x01f, 0x00000004,
0x020, 0x00000046,
0x01f, 0x00000005,
0x020, 0x000001e6,
0x01f, 0x00000006,
0x020, 0x000001a6,
0x01f, 0x00000007,
0x020, 0x00000166,
0x01f, 0x00000008,
0x020, 0x000000c7,
0x01f, 0x00000009,
0x020, 0x00000087,
0x01f, 0x0000000a,
0x020, 0x000000f7,
0x01f, 0x0000000b,
0x020, 0x000000d7,
0x01f, 0x0000000c,
0x020, 0x000000b7,
0x01f, 0x0000000d,
0x020, 0x00000097,
0x01f, 0x0000000e,
0x020, 0x00000077,
0x01f, 0x0000000f,
0x020, 0x00000057,
0x01f, 0x00000010,
0x020, 0x00000037,
0x01f, 0x00000011,
0x020, 0x000000fb,
0x01f, 0x00000012,
0x020, 0x000000db,
0x01f, 0x00000013,
0x020, 0x000000bb,
0x01f, 0x00000014,
0x020, 0x000000ff,
0x01f, 0x00000015,
0x020, 0x000000e3,
0x01f, 0x00000016,
0x020, 0x000000c3,
0x01f, 0x00000017,
0x020, 0x000000a3,
0x01f, 0x00000018,
0x020, 0x00000083,
0x01f, 0x00000019,
0x020, 0x00000063,
0x01f, 0x0000001a,
0x020, 0x00000043,
0x01f, 0x0000001b,
0x020, 0x00000023,
0x01f, 0x0000001c,
0x020, 0x00000003,
0x01f, 0x0000001d,
0x020, 0x000001e3,
0x01f, 0x0000001e,
0x020, 0x000001c3,
0x01f, 0x0000001f,
0x020, 0x000001a3,
0x01f, 0x00000020,
0x020, 0x00000183,
0x01f, 0x00000021,
0x020, 0x00000163,
0x01f, 0x00000022,
0x020, 0x00000143,
0x01f, 0x00000023,
0x020, 0x00000123,
0x01f, 0x00000024,
0x020, 0x00000103,
0x023, 0x00000203,
0x024, 0x00000100,
0x00b, 0x000001ba,
0x02c, 0x000003d7,
0x02d, 0x00000ff0,
0x000, 0x00000037,
0x004, 0x00000160,
0x007, 0x00000080,
0x002, 0x0000088d,
0x0fe, 0x00000000,
0x0fe, 0x00000000,
0x016, 0x00000200,
0x016, 0x00000380,
0x016, 0x00000020,
0x016, 0x000001a0,
0x000, 0x000000bf,
0x00d, 0x0000001f,
0x00d, 0x00000c9f,
0x002, 0x0000004d,
0x000, 0x00000cbf,
0x004, 0x00000975,
0x007, 0x00000700,
};
u32 RTL8192E_RADIO_B_ARR[RTL8192E_RADIO_B_ARR_LEN] = {
0x019, 0x00000003,
0x000, 0x000000bf,
0x001, 0x000006e0,
0x002, 0x0000004c,
0x003, 0x000007f1,
0x004, 0x00000975,
0x005, 0x00000c58,
0x006, 0x00000ae6,
0x007, 0x000000ca,
0x008, 0x00000e1c,
0x000, 0x000000b7,
0x00a, 0x00000850,
0x000, 0x000000bf,
0x00b, 0x000001ba,
0x00c, 0x00000240,
0x00e, 0x00000020,
0x015, 0x00000f80,
0x016, 0x00000020,
0x017, 0x00000597,
0x018, 0x0000050a,
0x01a, 0x00000e00,
0x01b, 0x00000f5e,
0x01d, 0x00000607,
0x01e, 0x000006cc,
0x00b, 0x000001ba,
0x023, 0x00000203,
0x024, 0x00000100,
0x000, 0x00000037,
0x004, 0x00000160,
0x016, 0x00000200,
0x016, 0x00000380,
0x016, 0x00000020,
0x016, 0x000001a0,
0x00d, 0x00000ccc,
0x000, 0x000000bf,
0x002, 0x0000004d,
0x000, 0x00000cbf,
0x004, 0x00000975,
0x007, 0x00000700,
};
u32 RTL8192E_MACPHY_ARR[] = {
0x03c, 0xffff0000, 0x00000f0f,
0x340, 0xffffffff, 0x161a1a1a,
0x344, 0xffffffff, 0x12121416,
0x348, 0x0000ffff, 0x00001818,
0x12c, 0xffffffff, 0x04000802,
0x318, 0x00000fff, 0x00000100,
};
u32 RTL8192E_MACPHY_ARR_PG[] = {
0x03c, 0xffff0000, 0x00000f0f,
0xe00, 0xffffffff, 0x06090909,
0xe04, 0xffffffff, 0x00030306,
0xe08, 0x0000ff00, 0x00000000,
0xe10, 0xffffffff, 0x0a0c0d0f,
0xe14, 0xffffffff, 0x06070809,
0xe18, 0xffffffff, 0x0a0c0d0f,
0xe1c, 0xffffffff, 0x06070809,
0x12c, 0xffffffff, 0x04000802,
0x318, 0x00000fff, 0x00000800,
};
u32 RTL8192E_AGCTAB_ARR[RTL8192E_AGCTAB_ARR_LEN] = {
0xc78, 0x7d000001,
0xc78, 0x7d010001,
0xc78, 0x7d020001,
0xc78, 0x7d030001,
0xc78, 0x7d040001,
0xc78, 0x7d050001,
0xc78, 0x7c060001,
0xc78, 0x7b070001,
0xc78, 0x7a080001,
0xc78, 0x79090001,
0xc78, 0x780a0001,
0xc78, 0x770b0001,
0xc78, 0x760c0001,
0xc78, 0x750d0001,
0xc78, 0x740e0001,
0xc78, 0x730f0001,
0xc78, 0x72100001,
0xc78, 0x71110001,
0xc78, 0x70120001,
0xc78, 0x6f130001,
0xc78, 0x6e140001,
0xc78, 0x6d150001,
0xc78, 0x6c160001,
0xc78, 0x6b170001,
0xc78, 0x6a180001,
0xc78, 0x69190001,
0xc78, 0x681a0001,
0xc78, 0x671b0001,
0xc78, 0x661c0001,
0xc78, 0x651d0001,
0xc78, 0x641e0001,
0xc78, 0x491f0001,
0xc78, 0x48200001,
0xc78, 0x47210001,
0xc78, 0x46220001,
0xc78, 0x45230001,
0xc78, 0x44240001,
0xc78, 0x43250001,
0xc78, 0x28260001,
0xc78, 0x27270001,
0xc78, 0x26280001,
0xc78, 0x25290001,
0xc78, 0x242a0001,
0xc78, 0x232b0001,
0xc78, 0x222c0001,
0xc78, 0x212d0001,
0xc78, 0x202e0001,
0xc78, 0x0a2f0001,
0xc78, 0x08300001,
0xc78, 0x06310001,
0xc78, 0x05320001,
0xc78, 0x04330001,
0xc78, 0x03340001,
0xc78, 0x02350001,
0xc78, 0x01360001,
0xc78, 0x00370001,
0xc78, 0x00380001,
0xc78, 0x00390001,
0xc78, 0x003a0001,
0xc78, 0x003b0001,
0xc78, 0x003c0001,
0xc78, 0x003d0001,
0xc78, 0x003e0001,
0xc78, 0x003f0001,
0xc78, 0x7d400001,
0xc78, 0x7d410001,
0xc78, 0x7d420001,
0xc78, 0x7d430001,
0xc78, 0x7d440001,
0xc78, 0x7d450001,
0xc78, 0x7c460001,
0xc78, 0x7b470001,
0xc78, 0x7a480001,
0xc78, 0x79490001,
0xc78, 0x784a0001,
0xc78, 0x774b0001,
0xc78, 0x764c0001,
0xc78, 0x754d0001,
0xc78, 0x744e0001,
0xc78, 0x734f0001,
0xc78, 0x72500001,
0xc78, 0x71510001,
0xc78, 0x70520001,
0xc78, 0x6f530001,
0xc78, 0x6e540001,
0xc78, 0x6d550001,
0xc78, 0x6c560001,
0xc78, 0x6b570001,
0xc78, 0x6a580001,
0xc78, 0x69590001,
0xc78, 0x685a0001,
0xc78, 0x675b0001,
0xc78, 0x665c0001,
0xc78, 0x655d0001,
0xc78, 0x645e0001,
0xc78, 0x495f0001,
0xc78, 0x48600001,
0xc78, 0x47610001,
0xc78, 0x46620001,
0xc78, 0x45630001,
0xc78, 0x44640001,
0xc78, 0x43650001,
0xc78, 0x28660001,
0xc78, 0x27670001,
0xc78, 0x26680001,
0xc78, 0x25690001,
0xc78, 0x246a0001,
0xc78, 0x236b0001,
0xc78, 0x226c0001,
0xc78, 0x216d0001,
0xc78, 0x206e0001,
0xc78, 0x0a6f0001,
0xc78, 0x08700001,
0xc78, 0x06710001,
0xc78, 0x05720001,
0xc78, 0x04730001,
0xc78, 0x03740001,
0xc78, 0x02750001,
0xc78, 0x01760001,
0xc78, 0x00770001,
0xc78, 0x00780001,
0xc78, 0x00790001,
0xc78, 0x007a0001,
0xc78, 0x007b0001,
0xc78, 0x007c0001,
0xc78, 0x007d0001,
0xc78, 0x007e0001,
0xc78, 0x007f0001,
0xc78, 0x2e00001e,
0xc78, 0x2e01001e,
0xc78, 0x2e02001e,
0xc78, 0x2e03001e,
0xc78, 0x2e04001e,
0xc78, 0x2e05001e,
0xc78, 0x3006001e,
0xc78, 0x3407001e,
0xc78, 0x3908001e,
0xc78, 0x3c09001e,
0xc78, 0x3f0a001e,
0xc78, 0x420b001e,
0xc78, 0x440c001e,
0xc78, 0x450d001e,
0xc78, 0x460e001e,
0xc78, 0x460f001e,
0xc78, 0x4710001e,
0xc78, 0x4811001e,
0xc78, 0x4912001e,
0xc78, 0x4a13001e,
0xc78, 0x4b14001e,
0xc78, 0x4b15001e,
0xc78, 0x4c16001e,
0xc78, 0x4d17001e,
0xc78, 0x4e18001e,
0xc78, 0x4f19001e,
0xc78, 0x4f1a001e,
0xc78, 0x501b001e,
0xc78, 0x511c001e,
0xc78, 0x521d001e,
0xc78, 0x521e001e,
0xc78, 0x531f001e,
0xc78, 0x5320001e,
0xc78, 0x5421001e,
0xc78, 0x5522001e,
0xc78, 0x5523001e,
0xc78, 0x5624001e,
0xc78, 0x5725001e,
0xc78, 0x5726001e,
0xc78, 0x5827001e,
0xc78, 0x5828001e,
0xc78, 0x5929001e,
0xc78, 0x592a001e,
0xc78, 0x5a2b001e,
0xc78, 0x5b2c001e,
0xc78, 0x5c2d001e,
0xc78, 0x5c2e001e,
0xc78, 0x5d2f001e,
0xc78, 0x5e30001e,
0xc78, 0x5f31001e,
0xc78, 0x6032001e,
0xc78, 0x6033001e,
0xc78, 0x6134001e,
0xc78, 0x6235001e,
0xc78, 0x6336001e,
0xc78, 0x6437001e,
0xc78, 0x6438001e,
0xc78, 0x6539001e,
0xc78, 0x663a001e,
0xc78, 0x673b001e,
0xc78, 0x673c001e,
0xc78, 0x683d001e,
0xc78, 0x693e001e,
0xc78, 0x6a3f001e,
};
| linux-master | drivers/staging/rtl8192e/rtl8192e/table.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <[email protected]>, et al.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtl_pci.h"
#include "rtl_core.h"
static void _rtl92e_parse_pci_configuration(struct pci_dev *pdev,
struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u8 tmp;
u16 link_ctrl_reg;
pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &link_ctrl_reg);
pci_read_config_byte(pdev, 0x98, &tmp);
tmp |= BIT4;
pci_write_config_byte(pdev, 0x98, tmp);
tmp = 0x17;
pci_write_config_byte(pdev, 0x70f, tmp);
}
bool rtl92e_check_adapter(struct pci_dev *pdev, struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u16 device_id;
u8 revision_id;
u16 irq_line;
device_id = pdev->device;
revision_id = pdev->revision;
pci_read_config_word(pdev, 0x3C, &irq_line);
priv->card_8192 = NIC_8192E;
if (device_id == 0x8192) {
switch (revision_id) {
case HAL_HW_PCI_REVISION_ID_8192PCIE:
dev_info(&pdev->dev,
"Adapter(8192 PCI-E) is found - DeviceID=%x\n",
device_id);
priv->card_8192 = NIC_8192E;
break;
case HAL_HW_PCI_REVISION_ID_8192SE:
dev_info(&pdev->dev,
"Adapter(8192SE) is found - DeviceID=%x\n",
device_id);
priv->card_8192 = NIC_8192SE;
break;
default:
dev_info(&pdev->dev,
"UNKNOWN nic type(%4x:%4x)\n",
pdev->vendor, pdev->device);
priv->card_8192 = NIC_UNKNOWN;
return false;
}
}
if (priv->card_8192 != NIC_8192E) {
dev_info(&pdev->dev,
"Detect info(%x) and hardware info(%x) not match!\n",
NIC_8192E, priv->card_8192);
dev_info(&pdev->dev,
"Please select proper driver before install!!!!\n");
return false;
}
_rtl92e_parse_pci_configuration(pdev, dev);
return true;
}
| linux-master | drivers/staging/rtl8192e/rtl8192e/rtl_pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <[email protected]>, et al.
*
* Contact Information: wlanfae <[email protected]>
*/
#include <linux/uaccess.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/ieee80211.h>
#include "rtl_core.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
#include "r8190P_rtl8256.h"
#include "r8192E_cmdpkt.h"
#include "rtl_wx.h"
#include "rtl_dm.h"
#include "rtl_pm.h"
int hwwep = 1;
static char *ifname = "wlan%d";
static struct pci_device_id rtl8192_pci_id_tbl[] = {
{PCI_DEVICE(0x10ec, 0x8192)},
{PCI_DEVICE(0x07aa, 0x0044)},
{PCI_DEVICE(0x07aa, 0x0047)},
{}
};
MODULE_DEVICE_TABLE(pci, rtl8192_pci_id_tbl);
static int _rtl92e_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
static void _rtl92e_pci_disconnect(struct pci_dev *pdev);
static irqreturn_t _rtl92e_irq(int irq, void *netdev);
static SIMPLE_DEV_PM_OPS(rtl92e_pm_ops, rtl92e_suspend, rtl92e_resume);
static struct pci_driver rtl8192_pci_driver = {
.name = DRV_NAME, /* Driver name */
.id_table = rtl8192_pci_id_tbl, /* PCI_ID table */
.probe = _rtl92e_pci_probe, /* probe fn */
.remove = _rtl92e_pci_disconnect, /* remove fn */
.driver.pm = &rtl92e_pm_ops,
};
static short _rtl92e_is_tx_queue_empty(struct net_device *dev);
static void _rtl92e_watchdog_wq_cb(void *data);
static void _rtl92e_watchdog_timer_cb(struct timer_list *t);
static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
int rate);
static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void _rtl92e_tx_cmd(struct net_device *dev, struct sk_buff *skb);
static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb);
static short _rtl92e_pci_initdescring(struct net_device *dev);
static void _rtl92e_irq_tx_tasklet(struct tasklet_struct *t);
static void _rtl92e_irq_rx_tasklet(struct tasklet_struct *t);
static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv);
static int _rtl92e_up(struct net_device *dev, bool is_silent_reset);
static int _rtl92e_try_up(struct net_device *dev);
static int _rtl92e_down(struct net_device *dev, bool shutdownrf);
static void _rtl92e_restart(void *data);
/****************************************************************************
* -----------------------------IO STUFF-------------------------
****************************************************************************/
u8 rtl92e_readb(struct net_device *dev, int x)
{
return 0xff & readb((u8 __iomem *)dev->mem_start + x);
}
u32 rtl92e_readl(struct net_device *dev, int x)
{
return readl((u8 __iomem *)dev->mem_start + x);
}
u16 rtl92e_readw(struct net_device *dev, int x)
{
return readw((u8 __iomem *)dev->mem_start + x);
}
void rtl92e_writeb(struct net_device *dev, int x, u8 y)
{
writeb(y, (u8 __iomem *)dev->mem_start + x);
udelay(20);
}
void rtl92e_writel(struct net_device *dev, int x, u32 y)
{
writel(y, (u8 __iomem *)dev->mem_start + x);
udelay(20);
}
void rtl92e_writew(struct net_device *dev, int x, u16 y)
{
writew(y, (u8 __iomem *)dev->mem_start + x);
udelay(20);
}
/****************************************************************************
* -----------------------------GENERAL FUNCTION-------------------------
****************************************************************************/
bool rtl92e_set_rf_state(struct net_device *dev,
enum rt_rf_power_state state_to_set,
RT_RF_CHANGE_SOURCE change_source)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
bool action_allowed = false;
bool connect_by_ssid = false;
enum rt_rf_power_state rt_state;
u16 rf_wait_counter = 0;
unsigned long flag;
while (true) {
spin_lock_irqsave(&priv->rf_ps_lock, flag);
if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
while (priv->rf_change_in_progress) {
rf_wait_counter++;
mdelay(1);
if (rf_wait_counter > 100) {
netdev_warn(dev,
"%s(): Timeout waiting for RF change.\n",
__func__);
return false;
}
}
} else {
priv->rf_change_in_progress = true;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
break;
}
}
rt_state = priv->rtllib->rf_power_state;
switch (state_to_set) {
case rf_on:
priv->rtllib->rf_off_reason &= (~change_source);
if ((change_source == RF_CHANGE_BY_HW) && priv->hw_radio_off)
priv->hw_radio_off = false;
if (!priv->rtllib->rf_off_reason) {
priv->rtllib->rf_off_reason = 0;
action_allowed = true;
if (rt_state == rf_off &&
change_source >= RF_CHANGE_BY_HW)
connect_by_ssid = true;
}
break;
case rf_off:
if ((priv->rtllib->iw_mode == IW_MODE_INFRA) ||
(priv->rtllib->iw_mode == IW_MODE_ADHOC)) {
if ((priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) ||
(change_source > RF_CHANGE_BY_IPS)) {
if (ieee->link_state == MAC80211_LINKED)
priv->blinked_ingpio = true;
else
priv->blinked_ingpio = false;
rtllib_MgntDisconnect(priv->rtllib,
WLAN_REASON_DISASSOC_STA_HAS_LEFT);
}
}
if ((change_source == RF_CHANGE_BY_HW) && !priv->hw_radio_off)
priv->hw_radio_off = true;
priv->rtllib->rf_off_reason |= change_source;
action_allowed = true;
break;
case rf_sleep:
priv->rtllib->rf_off_reason |= change_source;
action_allowed = true;
break;
default:
break;
}
if (action_allowed) {
rtl92e_set_rf_power_state(dev, state_to_set);
if (state_to_set == rf_on) {
if (connect_by_ssid && priv->blinked_ingpio) {
schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
priv->blinked_ingpio = false;
}
}
}
spin_lock_irqsave(&priv->rf_ps_lock, flag);
priv->rf_change_in_progress = false;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
return action_allowed;
}
static short _rtl92e_check_nic_enough_desc(struct net_device *dev, int prio)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
if (ring->entries - skb_queue_len(&ring->queue) >= 2)
return 1;
return 0;
}
static void _rtl92e_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct r8192_priv *priv = rtllib_priv(dev);
schedule_work(&priv->reset_wq);
netdev_info(dev, "TXTIMEOUT");
}
void rtl92e_irq_enable(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->irq_enabled = 1;
rtl92e_enable_irq(dev);
}
void rtl92e_irq_disable(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
rtl92e_disable_irq(dev);
priv->irq_enabled = 0;
}
static void _rtl92e_set_chan(struct net_device *dev, short ch)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->chan = ch;
if (priv->rf_set_chan)
priv->rf_set_chan(dev, priv->chan);
}
static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_network *net = &priv->rtllib->current_network;
bool ShortPreamble;
if (cap & WLAN_CAPABILITY_SHORT_PREAMBLE) {
if (priv->dot11_current_preamble_mode != PREAMBLE_SHORT) {
ShortPreamble = true;
priv->dot11_current_preamble_mode = PREAMBLE_SHORT;
priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
(unsigned char *)&ShortPreamble);
}
} else {
if (priv->dot11_current_preamble_mode != PREAMBLE_LONG) {
ShortPreamble = false;
priv->dot11_current_preamble_mode = PREAMBLE_LONG;
priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
(unsigned char *)&ShortPreamble);
}
}
if (net->mode & (WIRELESS_MODE_G | WIRELESS_MODE_N_24G)) {
u8 slot_time_val;
u8 cur_slot_time = priv->slot_time;
if ((cap & WLAN_CAPABILITY_SHORT_SLOT_TIME) &&
(!priv->rtllib->ht_info->current_rt2rt_long_slot_time)) {
if (cur_slot_time != SHORT_SLOT_TIME) {
slot_time_val = SHORT_SLOT_TIME;
priv->rtllib->SetHwRegHandler(dev,
HW_VAR_SLOT_TIME, &slot_time_val);
}
} else {
if (cur_slot_time != NON_SHORT_SLOT_TIME) {
slot_time_val = NON_SHORT_SLOT_TIME;
priv->rtllib->SetHwRegHandler(dev,
HW_VAR_SLOT_TIME, &slot_time_val);
}
}
}
}
static const struct rtllib_qos_parameters def_qos_parameters = {
{cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)},
{cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)},
{2, 2, 2, 2},
{0, 0, 0, 0},
{0, 0, 0, 0}
};
static void _rtl92e_update_beacon(void *data)
{
struct r8192_priv *priv = container_of(data, struct r8192_priv, update_beacon_wq.work);
struct net_device *dev = priv->rtllib->dev;
struct rtllib_device *ieee = priv->rtllib;
struct rtllib_network *net = &ieee->current_network;
if (ieee->ht_info->bCurrentHTSupport)
HT_update_self_and_peer_setting(ieee, net);
ieee->ht_info->current_rt2rt_long_slot_time = net->bssht.bd_rt2rt_long_slot_time;
ieee->ht_info->RT2RT_HT_Mode = net->bssht.rt2rt_ht_mode;
_rtl92e_update_cap(dev, net->capability);
}
static void _rtl92e_qos_activate(void *data)
{
struct r8192_priv *priv = container_of(data, struct r8192_priv, qos_activate);
struct net_device *dev = priv->rtllib->dev;
int i;
mutex_lock(&priv->mutex);
if (priv->rtllib->link_state != MAC80211_LINKED)
goto success;
for (i = 0; i < QOS_QUEUE_NUM; i++)
priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM, (u8 *)(&i));
success:
mutex_unlock(&priv->mutex);
}
static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv,
int active_network,
struct rtllib_network *network)
{
int ret = 0;
u32 size = sizeof(struct rtllib_qos_parameters);
if (priv->rtllib->link_state != MAC80211_LINKED)
return ret;
if (priv->rtllib->iw_mode != IW_MODE_INFRA)
return ret;
if (network->flags & NETWORK_HAS_QOS_MASK) {
if (active_network &&
(network->flags & NETWORK_HAS_QOS_PARAMETERS))
network->qos_data.active = network->qos_data.supported;
if ((network->qos_data.active == 1) && (active_network == 1) &&
(network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
(network->qos_data.old_param_count !=
network->qos_data.param_count)) {
network->qos_data.old_param_count =
network->qos_data.param_count;
priv->rtllib->wmm_acm = network->qos_data.wmm_acm;
schedule_work(&priv->qos_activate);
}
} else {
memcpy(&priv->rtllib->current_network.qos_data.parameters,
&def_qos_parameters, size);
if ((network->qos_data.active == 1) && (active_network == 1))
schedule_work(&priv->qos_activate);
network->qos_data.active = 0;
network->qos_data.supported = 0;
}
return 0;
}
static int _rtl92e_handle_beacon(struct net_device *dev,
struct rtllib_beacon *beacon,
struct rtllib_network *network)
{
struct r8192_priv *priv = rtllib_priv(dev);
_rtl92e_qos_handle_probe_response(priv, 1, network);
schedule_delayed_work(&priv->update_beacon_wq, 0);
return 0;
}
static int _rtl92e_qos_assoc_resp(struct r8192_priv *priv,
struct rtllib_network *network)
{
unsigned long flags;
u32 size = sizeof(struct rtllib_qos_parameters);
int set_qos_param = 0;
if (!priv || !network)
return 0;
if (priv->rtllib->link_state != MAC80211_LINKED)
return 0;
if (priv->rtllib->iw_mode != IW_MODE_INFRA)
return 0;
spin_lock_irqsave(&priv->rtllib->lock, flags);
if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
memcpy(&priv->rtllib->current_network.qos_data.parameters,
&network->qos_data.parameters,
sizeof(struct rtllib_qos_parameters));
priv->rtllib->current_network.qos_data.active = 1;
priv->rtllib->wmm_acm = network->qos_data.wmm_acm;
set_qos_param = 1;
priv->rtllib->current_network.qos_data.old_param_count =
priv->rtllib->current_network.qos_data.param_count;
priv->rtllib->current_network.qos_data.param_count =
network->qos_data.param_count;
} else {
memcpy(&priv->rtllib->current_network.qos_data.parameters,
&def_qos_parameters, size);
priv->rtllib->current_network.qos_data.active = 0;
priv->rtllib->current_network.qos_data.supported = 0;
set_qos_param = 1;
}
spin_unlock_irqrestore(&priv->rtllib->lock, flags);
if (set_qos_param == 1) {
rtl92e_dm_init_edca_turbo(priv->rtllib->dev);
schedule_work(&priv->qos_activate);
}
return 0;
}
static int _rtl92e_handle_assoc_response(struct net_device *dev,
struct rtllib_assoc_response_frame *resp,
struct rtllib_network *network)
{
struct r8192_priv *priv = rtllib_priv(dev);
_rtl92e_qos_assoc_resp(priv, network);
return 0;
}
static void _rtl92e_prepare_beacon(struct tasklet_struct *t)
{
struct r8192_priv *priv = from_tasklet(priv, t,
irq_prepare_beacon_tasklet);
struct net_device *dev = priv->rtllib->dev;
struct sk_buff *pskb = NULL, *pnewskb = NULL;
struct cb_desc *tcb_desc = NULL;
struct rtl8192_tx_ring *ring = NULL;
struct tx_desc *pdesc = NULL;
ring = &priv->tx_ring[BEACON_QUEUE];
pskb = __skb_dequeue(&ring->queue);
kfree_skb(pskb);
pnewskb = rtllib_get_beacon(priv->rtllib);
if (!pnewskb)
return;
tcb_desc = (struct cb_desc *)(pnewskb->cb + 8);
tcb_desc->queue_index = BEACON_QUEUE;
tcb_desc->data_rate = 2;
tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
skb_push(pnewskb, priv->rtllib->tx_headroom);
pdesc = &ring->desc[0];
rtl92e_fill_tx_desc(dev, pdesc, tcb_desc, pnewskb);
__skb_queue_tail(&ring->queue, pnewskb);
pdesc->OWN = 1;
}
static void _rtl92e_stop_beacon(struct net_device *dev)
{
}
void rtl92e_config_rate(struct net_device *dev, u16 *rate_config)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_network *net;
u8 i = 0, basic_rate = 0;
net = &priv->rtllib->current_network;
for (i = 0; i < net->rates_len; i++) {
basic_rate = net->rates[i] & 0x7f;
switch (basic_rate) {
case MGN_1M:
*rate_config |= RRSR_1M;
break;
case MGN_2M:
*rate_config |= RRSR_2M;
break;
case MGN_5_5M:
*rate_config |= RRSR_5_5M;
break;
case MGN_11M:
*rate_config |= RRSR_11M;
break;
case MGN_6M:
*rate_config |= RRSR_6M;
break;
case MGN_9M:
*rate_config |= RRSR_9M;
break;
case MGN_12M:
*rate_config |= RRSR_12M;
break;
case MGN_18M:
*rate_config |= RRSR_18M;
break;
case MGN_24M:
*rate_config |= RRSR_24M;
break;
case MGN_36M:
*rate_config |= RRSR_36M;
break;
case MGN_48M:
*rate_config |= RRSR_48M;
break;
case MGN_54M:
*rate_config |= RRSR_54M;
break;
}
}
for (i = 0; i < net->rates_ex_len; i++) {
basic_rate = net->rates_ex[i] & 0x7f;
switch (basic_rate) {
case MGN_1M:
*rate_config |= RRSR_1M;
break;
case MGN_2M:
*rate_config |= RRSR_2M;
break;
case MGN_5_5M:
*rate_config |= RRSR_5_5M;
break;
case MGN_11M:
*rate_config |= RRSR_11M;
break;
case MGN_6M:
*rate_config |= RRSR_6M;
break;
case MGN_9M:
*rate_config |= RRSR_9M;
break;
case MGN_12M:
*rate_config |= RRSR_12M;
break;
case MGN_18M:
*rate_config |= RRSR_18M;
break;
case MGN_24M:
*rate_config |= RRSR_24M;
break;
case MGN_36M:
*rate_config |= RRSR_36M;
break;
case MGN_48M:
*rate_config |= RRSR_48M;
break;
case MGN_54M:
*rate_config |= RRSR_54M;
break;
}
}
}
static void _rtl92e_refresh_support_rate(struct r8192_priv *priv)
{
struct rtllib_device *ieee = priv->rtllib;
if (ieee->mode == WIRELESS_MODE_N_24G) {
memcpy(ieee->reg_dot11ht_oper_rate_set,
ieee->reg_ht_supp_rate_set, 16);
memcpy(ieee->reg_dot11tx_ht_oper_rate_set,
ieee->reg_ht_supp_rate_set, 16);
} else {
memset(ieee->reg_dot11ht_oper_rate_set, 0, 16);
}
}
void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 support_mode = (WIRELESS_MODE_N_24G | WIRELESS_MODE_G | WIRELESS_MODE_B);
if ((wireless_mode == WIRELESS_MODE_AUTO) || ((wireless_mode & support_mode) == 0))
wireless_mode = WIRELESS_MODE_N_24G;
if ((wireless_mode & (WIRELESS_MODE_B | WIRELESS_MODE_G)) ==
(WIRELESS_MODE_G | WIRELESS_MODE_B))
wireless_mode = WIRELESS_MODE_G;
priv->rtllib->mode = wireless_mode;
if (wireless_mode == WIRELESS_MODE_N_24G)
priv->rtllib->ht_info->enable_ht = 1;
else
priv->rtllib->ht_info->enable_ht = 0;
_rtl92e_refresh_support_rate(priv);
}
static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
(&priv->rtllib->pwr_save_ctrl);
bool init_status;
priv->up = 1;
priv->rtllib->ieee_up = 1;
priv->up_first_time = 0;
init_status = rtl92e_start_adapter(dev);
if (!init_status) {
netdev_err(dev, "%s(): Initialization failed!\n", __func__);
return -1;
}
RT_CLEAR_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC);
if (priv->polling_timer_on == 0)
rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
if (priv->rtllib->link_state != MAC80211_LINKED)
rtllib_softmac_start_protocol(priv->rtllib, 0);
rtllib_reset_queue(priv->rtllib);
_rtl92e_watchdog_timer_cb(&priv->watch_dog_timer);
if (!netif_queue_stopped(dev))
netif_start_queue(dev);
else
netif_wake_queue(dev);
priv->bfirst_after_down = false;
return 0;
}
static int _rtl92e_sta_down(struct net_device *dev, bool shutdownrf)
{
struct r8192_priv *priv = rtllib_priv(dev);
unsigned long flags = 0;
u8 rf_in_progress_timeout = 0;
if (priv->up == 0)
return -1;
priv->rtllib->rtllib_ips_leave(dev);
if (priv->rtllib->link_state == MAC80211_LINKED)
rtl92e_leisure_ps_leave(dev);
priv->up = 0;
priv->rtllib->ieee_up = 0;
priv->bfirst_after_down = true;
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
priv->rtllib->wpa_ie_len = 0;
kfree(priv->rtllib->wpa_ie);
priv->rtllib->wpa_ie = NULL;
rtl92e_cam_reset(dev);
memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
rtl92e_irq_disable(dev);
del_timer_sync(&priv->watch_dog_timer);
_rtl92e_cancel_deferred_work(priv);
cancel_delayed_work(&priv->rtllib->hw_wakeup_wq);
rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
spin_lock_irqsave(&priv->rf_ps_lock, flags);
while (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
if (rf_in_progress_timeout > 100) {
spin_lock_irqsave(&priv->rf_ps_lock, flags);
break;
}
mdelay(1);
rf_in_progress_timeout++;
spin_lock_irqsave(&priv->rf_ps_lock, flags);
}
priv->rf_change_in_progress = true;
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
rtl92e_stop_adapter(dev, false);
spin_lock_irqsave(&priv->rf_ps_lock, flags);
priv->rf_change_in_progress = false;
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
udelay(100);
memset(&priv->rtllib->current_network, 0,
offsetof(struct rtllib_network, list));
return 0;
}
static void _rtl92e_init_priv_handler(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->rtllib->softmac_hard_start_xmit = _rtl92e_hard_start_xmit;
priv->rtllib->set_chan = _rtl92e_set_chan;
priv->rtllib->link_change = rtl92e_link_change;
priv->rtllib->softmac_data_hard_start_xmit = _rtl92e_hard_data_xmit;
priv->rtllib->check_nic_enough_desc = _rtl92e_check_nic_enough_desc;
priv->rtllib->handle_assoc_response = _rtl92e_handle_assoc_response;
priv->rtllib->handle_beacon = _rtl92e_handle_beacon;
priv->rtllib->set_wireless_mode = rtl92e_set_wireless_mode;
priv->rtllib->leisure_ps_leave = rtl92e_leisure_ps_leave;
priv->rtllib->set_bw_mode_handler = rtl92e_set_bw_mode;
priv->rf_set_chan = rtl92e_set_channel;
priv->rtllib->start_send_beacons = rtl92e_start_beacon;
priv->rtllib->stop_send_beacons = _rtl92e_stop_beacon;
priv->rtllib->sta_wake_up = rtl92e_hw_wakeup;
priv->rtllib->enter_sleep_state = rtl92e_enter_sleep;
priv->rtllib->ps_is_queue_empty = _rtl92e_is_tx_queue_empty;
priv->rtllib->GetNmodeSupportBySecCfg = rtl92e_get_nmode_support_by_sec;
priv->rtllib->GetHalfNmodeSupportByAPsHandler =
rtl92e_is_halfn_supported_by_ap;
priv->rtllib->SetHwRegHandler = rtl92e_set_reg;
priv->rtllib->AllowAllDestAddrHandler = rtl92e_set_monitor_mode;
priv->rtllib->init_gain_handler = rtl92e_init_gain;
priv->rtllib->rtllib_ips_leave_wq = rtl92e_rtllib_ips_leave_wq;
priv->rtllib->rtllib_ips_leave = rtl92e_rtllib_ips_leave;
priv->rtllib->ScanOperationBackupHandler = rtl92e_scan_op_backup;
}
static void _rtl92e_init_priv_constant(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
&priv->rtllib->pwr_save_ctrl;
psc->reg_max_lps_awake_intvl = 5;
}
static void _rtl92e_init_priv_variable(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 i;
priv->dot11_current_preamble_mode = PREAMBLE_AUTO;
priv->rtllib->status = 0;
priv->polling_timer_on = 0;
priv->up_first_time = 1;
priv->blinked_ingpio = false;
priv->being_init_adapter = false;
priv->txringcount = 64;
priv->rxbuffersize = 9100;
priv->rxringcount = MAX_RX_COUNT;
priv->irq_enabled = 0;
priv->chan = 1;
priv->rtllib->mode = WIRELESS_MODE_AUTO;
priv->rtllib->iw_mode = IW_MODE_INFRA;
priv->rtllib->net_promiscuous_md = false;
priv->rtllib->intel_promiscuous_md_info.promiscuous_on = false;
priv->rtllib->intel_promiscuous_md_info.fltr_src_sta_frame =
false;
priv->rtllib->ieee_up = 0;
priv->retry_rts = DEFAULT_RETRY_RTS;
priv->retry_data = DEFAULT_RETRY_DATA;
priv->rtllib->rts = DEFAULT_RTS_THRESHOLD;
priv->rtllib->rate = 110;
priv->promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
priv->bcck_in_ch14 = false;
priv->cck_present_attn = 0;
priv->rfa_txpowertrackingindex = 0;
priv->rfc_txpowertrackingindex = 0;
priv->cck_pwr_enl = 6;
priv->rst_progress = RESET_TYPE_NORESET;
priv->force_reset = false;
memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
priv->rx_ctr = 0;
priv->rtllib->wx_set_enc = 0;
priv->hw_radio_off = false;
priv->rtllib->rf_off_reason = 0;
priv->rf_change_in_progress = false;
priv->hw_rf_off_action = 0;
priv->set_rf_pwr_state_in_progress = false;
priv->rtllib->pwr_save_ctrl.bLeisurePs = true;
priv->rtllib->LPSDelayCnt = 0;
priv->rtllib->sta_sleep = LPS_IS_WAKE;
priv->rtllib->rf_power_state = rf_on;
priv->rtllib->current_network.beacon_interval = DEFAULT_BEACONINTERVAL;
priv->rtllib->iw_mode = IW_MODE_INFRA;
priv->rtllib->active_scan = 1;
priv->rtllib->be_scan_inprogress = false;
priv->rtllib->fts = DEFAULT_FRAG_THRESHOLD;
priv->fw_info = vzalloc(sizeof(struct rt_firmware));
if (!priv->fw_info)
netdev_err(dev,
"rtl8192e: Unable to allocate space for firmware\n");
skb_queue_head_init(&priv->skb_queue);
for (i = 0; i < MAX_QUEUE_SIZE; i++)
skb_queue_head_init(&priv->rtllib->skb_waitQ[i]);
}
static void _rtl92e_init_priv_lock(struct r8192_priv *priv)
{
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->irq_th_lock);
spin_lock_init(&priv->rf_ps_lock);
spin_lock_init(&priv->ps_lock);
mutex_init(&priv->wx_mutex);
mutex_init(&priv->rf_mutex);
mutex_init(&priv->mutex);
}
static void _rtl92e_init_priv_task(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
INIT_WORK(&priv->reset_wq, (void *)_rtl92e_restart);
INIT_WORK(&priv->rtllib->ips_leave_wq, (void *)rtl92e_ips_leave_wq);
INIT_DELAYED_WORK(&priv->watch_dog_wq, (void *)_rtl92e_watchdog_wq_cb);
INIT_DELAYED_WORK(&priv->txpower_tracking_wq, (void *)rtl92e_dm_txpower_tracking_wq);
INIT_DELAYED_WORK(&priv->rfpath_check_wq, (void *)rtl92e_dm_rf_pathcheck_wq);
INIT_DELAYED_WORK(&priv->update_beacon_wq, (void *)_rtl92e_update_beacon);
INIT_WORK(&priv->qos_activate, (void *)_rtl92e_qos_activate);
INIT_DELAYED_WORK(&priv->rtllib->hw_wakeup_wq, (void *)rtl92e_hw_wakeup_wq);
INIT_DELAYED_WORK(&priv->rtllib->hw_sleep_wq, (void *)rtl92e_hw_sleep_wq);
tasklet_setup(&priv->irq_rx_tasklet, _rtl92e_irq_rx_tasklet);
tasklet_setup(&priv->irq_tx_tasklet, _rtl92e_irq_tx_tasklet);
tasklet_setup(&priv->irq_prepare_beacon_tasklet,
_rtl92e_prepare_beacon);
}
static short _rtl92e_get_channel_map(struct net_device *dev)
{
int i;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->chnl_plan >= COUNTRY_CODE_MAX) {
netdev_info(dev,
"rtl819x_init:Error channel plan! Set to default.\n");
priv->chnl_plan = COUNTRY_CODE_FCC;
}
dot11d_init(priv->rtllib);
dot11d_channel_map(priv->chnl_plan, priv->rtllib);
for (i = 1; i <= 11; i++)
(priv->rtllib->active_channel_map)[i] = 1;
(priv->rtllib->active_channel_map)[12] = 2;
(priv->rtllib->active_channel_map)[13] = 2;
return 0;
}
static short _rtl92e_init(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
memset(&priv->stats, 0, sizeof(struct rt_stats));
_rtl92e_init_priv_handler(dev);
_rtl92e_init_priv_constant(dev);
_rtl92e_init_priv_variable(dev);
_rtl92e_init_priv_lock(priv);
_rtl92e_init_priv_task(dev);
rtl92e_get_eeprom_size(dev);
rtl92e_init_variables(dev);
_rtl92e_get_channel_map(dev);
rtl92e_dm_init(dev);
timer_setup(&priv->watch_dog_timer, _rtl92e_watchdog_timer_cb, 0);
timer_setup(&priv->gpio_polling_timer, rtl92e_check_rfctrl_gpio_timer,
0);
rtl92e_irq_disable(dev);
if (request_irq(dev->irq, _rtl92e_irq, IRQF_SHARED, dev->name, dev)) {
netdev_err(dev, "Error allocating IRQ %d", dev->irq);
return -1;
}
priv->irq = dev->irq;
if (_rtl92e_pci_initdescring(dev) != 0) {
netdev_err(dev, "Endopoints initialization failed");
free_irq(dev->irq, dev);
return -1;
}
return 0;
}
/***************************************************************************
* -------------------------------WATCHDOG STUFF---------------------------
**************************************************************************/
static short _rtl92e_is_tx_queue_empty(struct net_device *dev)
{
int i = 0;
struct r8192_priv *priv = rtllib_priv(dev);
for (i = 0; i <= MGNT_QUEUE; i++) {
if ((i == TXCMD_QUEUE) || (i == HCCA_QUEUE))
continue;
if (skb_queue_len(&(&priv->tx_ring[i])->queue) > 0) {
netdev_info(dev, "===>tx queue is not empty:%d, %d\n",
i, skb_queue_len(&(&priv->tx_ring[i])->queue));
return 0;
}
}
return 1;
}
static enum reset_type _rtl92e_tx_check_stuck(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 QueueID;
bool bCheckFwTxCnt = false;
struct rtl8192_tx_ring *ring = NULL;
struct sk_buff *skb = NULL;
struct cb_desc *tcb_desc = NULL;
unsigned long flags = 0;
switch (priv->rtllib->ps) {
case RTLLIB_PS_DISABLED:
break;
case (RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST):
break;
default:
break;
}
spin_lock_irqsave(&priv->irq_th_lock, flags);
for (QueueID = 0; QueueID < MAX_TX_QUEUE; QueueID++) {
if (QueueID == TXCMD_QUEUE)
continue;
if (QueueID == BEACON_QUEUE)
continue;
ring = &priv->tx_ring[QueueID];
if (skb_queue_len(&ring->queue) == 0) {
continue;
} else {
skb = __skb_peek(&ring->queue);
tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
tcb_desc->nStuckCount++;
bCheckFwTxCnt = true;
if (tcb_desc->nStuckCount > 1)
netdev_info(dev,
"%s: QueueID=%d tcb_desc->nStuckCount=%d\n",
__func__, QueueID,
tcb_desc->nStuckCount);
}
}
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
if (bCheckFwTxCnt) {
if (rtl92e_is_tx_stuck(dev))
return RESET_TYPE_SILENT;
}
return RESET_TYPE_NORESET;
}
static enum reset_type _rtl92e_rx_check_stuck(struct net_device *dev)
{
if (rtl92e_is_rx_stuck(dev))
return RESET_TYPE_SILENT;
return RESET_TYPE_NORESET;
}
static enum reset_type _rtl92e_if_check_reset(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
enum reset_type TxResetType = RESET_TYPE_NORESET;
enum reset_type RxResetType = RESET_TYPE_NORESET;
enum rt_rf_power_state rfState;
rfState = priv->rtllib->rf_power_state;
if (rfState == rf_on)
TxResetType = _rtl92e_tx_check_stuck(dev);
if (rfState == rf_on &&
(priv->rtllib->iw_mode == IW_MODE_INFRA) &&
(priv->rtllib->link_state == MAC80211_LINKED))
RxResetType = _rtl92e_rx_check_stuck(dev);
if (TxResetType == RESET_TYPE_NORMAL ||
RxResetType == RESET_TYPE_NORMAL) {
netdev_info(dev, "%s(): TxResetType is %d, RxResetType is %d\n",
__func__, TxResetType, RxResetType);
return RESET_TYPE_NORMAL;
} else if (TxResetType == RESET_TYPE_SILENT ||
RxResetType == RESET_TYPE_SILENT) {
netdev_info(dev, "%s(): TxResetType is %d, RxResetType is %d\n",
__func__, TxResetType, RxResetType);
return RESET_TYPE_SILENT;
} else {
return RESET_TYPE_NORESET;
}
}
static void _rtl92e_if_silent_reset(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 reset_times = 0;
int reset_status = 0;
struct rtllib_device *ieee = priv->rtllib;
unsigned long flag;
if (priv->rst_progress == RESET_TYPE_NORESET) {
priv->rst_progress = RESET_TYPE_SILENT;
spin_lock_irqsave(&priv->rf_ps_lock, flag);
if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
goto END;
}
priv->rf_change_in_progress = true;
priv->reset_in_progress = true;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
RESET_START:
mutex_lock(&priv->wx_mutex);
if (priv->rtllib->link_state == MAC80211_LINKED)
rtl92e_leisure_ps_leave(dev);
if (priv->up) {
netdev_info(dev, "%s():the driver is not up.\n",
__func__);
mutex_unlock(&priv->wx_mutex);
return;
}
priv->up = 0;
mdelay(1000);
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
rtl92e_irq_disable(dev);
del_timer_sync(&priv->watch_dog_timer);
_rtl92e_cancel_deferred_work(priv);
rtl92e_dm_deinit(dev);
rtllib_stop_scan_syncro(ieee);
if (ieee->link_state == MAC80211_LINKED) {
mutex_lock(&ieee->wx_mutex);
netdev_info(dev, "ieee->link_state is MAC80211_LINKED\n");
rtllib_stop_send_beacons(priv->rtllib);
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
rtllib_stop_scan(ieee);
netif_carrier_off(dev);
mutex_unlock(&ieee->wx_mutex);
} else {
netdev_info(dev, "ieee->link_state is NOT LINKED\n");
rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
}
rtl92e_dm_backup_state(dev);
mutex_unlock(&priv->wx_mutex);
reset_status = _rtl92e_up(dev, true);
if (reset_status == -1) {
if (reset_times < 3) {
reset_times++;
goto RESET_START;
} else {
netdev_warn(dev, "%s(): Reset Failed\n",
__func__);
}
}
ieee->is_silent_reset = 1;
spin_lock_irqsave(&priv->rf_ps_lock, flag);
priv->rf_change_in_progress = false;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
rtl92e_enable_hw_security_config(dev);
if (ieee->link_state == MAC80211_LINKED && ieee->iw_mode ==
IW_MODE_INFRA) {
ieee->set_chan(ieee->dev,
ieee->current_network.channel);
schedule_work(&ieee->associate_complete_wq);
} else if (ieee->link_state == MAC80211_LINKED && ieee->iw_mode ==
IW_MODE_ADHOC) {
ieee->set_chan(ieee->dev,
ieee->current_network.channel);
ieee->link_change(ieee->dev);
notify_wx_assoc_event(ieee);
rtllib_start_send_beacons(ieee);
netif_carrier_on(ieee->dev);
}
rtl92e_cam_restore(dev);
rtl92e_dm_restore_state(dev);
END:
priv->rst_progress = RESET_TYPE_NORESET;
priv->reset_count++;
priv->reset_in_progress = false;
rtl92e_writeb(dev, UFWP, 1);
}
}
static void _rtl92e_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
u32 *TotalRxDataNum)
{
u16 SlotIndex;
u8 i;
*TotalRxBcnNum = 0;
*TotalRxDataNum = 0;
SlotIndex = (priv->rtllib->link_detect_info.SlotIndex++) %
(priv->rtllib->link_detect_info.SlotNum);
priv->rtllib->link_detect_info.RxBcnNum[SlotIndex] =
priv->rtllib->link_detect_info.NumRecvBcnInPeriod;
priv->rtllib->link_detect_info.RxDataNum[SlotIndex] =
priv->rtllib->link_detect_info.NumRecvDataInPeriod;
for (i = 0; i < priv->rtllib->link_detect_info.SlotNum; i++) {
*TotalRxBcnNum += priv->rtllib->link_detect_info.RxBcnNum[i];
*TotalRxDataNum += priv->rtllib->link_detect_info.RxDataNum[i];
}
}
static void _rtl92e_watchdog_wq_cb(void *data)
{
struct r8192_priv *priv = container_of_dwork_rsl(data,
struct r8192_priv, watch_dog_wq);
struct net_device *dev = priv->rtllib->dev;
struct rtllib_device *ieee = priv->rtllib;
enum reset_type ResetType = RESET_TYPE_NORESET;
static u8 check_reset_cnt;
unsigned long flags;
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
(&priv->rtllib->pwr_save_ctrl);
bool bBusyTraffic = false;
bool bHigherBusyTraffic = false;
bool bHigherBusyRxTraffic = false;
bool bEnterPS = false;
if (!priv->up || priv->hw_radio_off)
return;
if (priv->rtllib->link_state >= MAC80211_LINKED) {
if (priv->rtllib->CntAfterLink < 2)
priv->rtllib->CntAfterLink++;
} else {
priv->rtllib->CntAfterLink = 0;
}
rtl92e_dm_watchdog(dev);
if (!rtllib_act_scanning(priv->rtllib, false)) {
if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->link_state ==
MAC80211_NOLINK) &&
(ieee->rf_power_state == rf_on) && !ieee->is_set_key &&
(!ieee->proto_stoppping) && !ieee->wx_set_enc) {
if ((ieee->pwr_save_ctrl.ReturnPoint ==
IPS_CALLBACK_NONE) &&
(!ieee->net_promiscuous_md)) {
rtl92e_ips_enter(dev);
}
}
}
if ((ieee->link_state == MAC80211_LINKED) && (ieee->iw_mode ==
IW_MODE_INFRA) && (!ieee->net_promiscuous_md)) {
if (ieee->link_detect_info.NumRxOkInPeriod > 100 ||
ieee->link_detect_info.NumTxOkInPeriod > 100)
bBusyTraffic = true;
if (ieee->link_detect_info.NumRxOkInPeriod > 4000 ||
ieee->link_detect_info.NumTxOkInPeriod > 4000) {
bHigherBusyTraffic = true;
if (ieee->link_detect_info.NumRxOkInPeriod > 5000)
bHigherBusyRxTraffic = true;
else
bHigherBusyRxTraffic = false;
}
if (((ieee->link_detect_info.NumRxUnicastOkInPeriod +
ieee->link_detect_info.NumTxOkInPeriod) > 8) ||
(ieee->link_detect_info.NumRxUnicastOkInPeriod > 2))
bEnterPS = false;
else
bEnterPS = true;
if (ieee->current_network.beacon_interval < 95)
bEnterPS = false;
if (bEnterPS)
rtl92e_leisure_ps_enter(dev);
else
rtl92e_leisure_ps_leave(dev);
} else {
rtl92e_leisure_ps_leave(dev);
}
ieee->link_detect_info.NumRxOkInPeriod = 0;
ieee->link_detect_info.NumTxOkInPeriod = 0;
ieee->link_detect_info.NumRxUnicastOkInPeriod = 0;
ieee->link_detect_info.bBusyTraffic = bBusyTraffic;
ieee->link_detect_info.bHigherBusyTraffic = bHigherBusyTraffic;
ieee->link_detect_info.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
if (ieee->link_state == MAC80211_LINKED && ieee->iw_mode == IW_MODE_INFRA) {
u32 TotalRxBcnNum = 0;
u32 TotalRxDataNum = 0;
_rtl92e_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
if ((TotalRxBcnNum + TotalRxDataNum) == 0)
priv->check_roaming_cnt++;
else
priv->check_roaming_cnt = 0;
if (priv->check_roaming_cnt > 0) {
if (ieee->rf_power_state == rf_off)
netdev_info(dev, "%s(): RF is off\n", __func__);
netdev_info(dev,
"===>%s(): AP is power off, chan:%d, connect another one\n",
__func__, priv->chan);
ieee->link_state = RTLLIB_ASSOCIATING;
RemovePeerTS(priv->rtllib,
priv->rtllib->current_network.bssid);
ieee->is_roaming = true;
ieee->is_set_key = false;
ieee->link_change(dev);
notify_wx_assoc_event(ieee);
if (!(ieee->rtllib_ap_sec_type(ieee) &
(SEC_ALG_CCMP | SEC_ALG_TKIP)))
schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
priv->check_roaming_cnt = 0;
}
ieee->link_detect_info.NumRecvBcnInPeriod = 0;
ieee->link_detect_info.NumRecvDataInPeriod = 0;
}
spin_lock_irqsave(&priv->tx_lock, flags);
if ((check_reset_cnt++ >= 3) && (!ieee->is_roaming) &&
(!priv->rf_change_in_progress) && (!psc->bSwRfProcessing)) {
ResetType = _rtl92e_if_check_reset(dev);
check_reset_cnt = 3;
}
spin_unlock_irqrestore(&priv->tx_lock, flags);
if (ResetType == RESET_TYPE_NORMAL) {
priv->rst_progress = RESET_TYPE_NORMAL;
return;
}
if ((priv->force_reset || ResetType == RESET_TYPE_SILENT))
_rtl92e_if_silent_reset(dev);
priv->force_reset = false;
priv->reset_in_progress = false;
}
static void _rtl92e_watchdog_timer_cb(struct timer_list *t)
{
struct r8192_priv *priv = from_timer(priv, t, watch_dog_timer);
schedule_delayed_work(&priv->watch_dog_wq, 0);
mod_timer(&priv->watch_dog_timer, jiffies +
msecs_to_jiffies(RTLLIB_WATCH_DOG_TIME));
}
/****************************************************************************
* ---------------------------- NIC TX/RX STUFF---------------------------
****************************************************************************/
void rtl92e_rx_enable(struct net_device *dev)
{
rtl92e_enable_rx(dev);
}
void rtl92e_tx_enable(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
rtl92e_enable_tx(dev);
rtllib_reset_queue(priv->rtllib);
}
static void _rtl92e_free_rx_ring(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
int i, rx_queue_idx;
for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE;
rx_queue_idx++) {
for (i = 0; i < priv->rxringcount; i++) {
struct sk_buff *skb = priv->rx_buf[rx_queue_idx][i];
if (!skb)
continue;
dma_unmap_single(&priv->pdev->dev,
*((dma_addr_t *)skb->cb),
priv->rxbuffersize, DMA_FROM_DEVICE);
kfree_skb(skb);
}
dma_free_coherent(&priv->pdev->dev,
sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount,
priv->rx_ring[rx_queue_idx],
priv->rx_ring_dma[rx_queue_idx]);
priv->rx_ring[rx_queue_idx] = NULL;
}
}
static void _rtl92e_free_tx_ring(struct net_device *dev, unsigned int prio)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
while (skb_queue_len(&ring->queue)) {
struct tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
dma_unmap_single(&priv->pdev->dev, entry->TxBuffAddr,
skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
dma_free_coherent(&priv->pdev->dev,
sizeof(*ring->desc) * ring->entries, ring->desc,
ring->dma);
ring->desc = NULL;
}
static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
int rate)
{
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
if ((priv->rtllib->rf_power_state == rf_off) || !priv->up ||
priv->reset_in_progress) {
kfree_skb(skb);
return;
}
if (queue_index == TXCMD_QUEUE)
netdev_warn(dev, "%s(): queue index == TXCMD_QUEUE\n",
__func__);
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb_push(skb, priv->rtllib->tx_headroom);
ret = _rtl92e_tx(dev, skb);
if (queue_index != MGNT_QUEUE) {
priv->rtllib->stats.tx_bytes += (skb->len -
priv->rtllib->tx_headroom);
priv->rtllib->stats.tx_packets++;
}
if (ret != 0)
kfree_skb(skb);
}
static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
if (queue_index != TXCMD_QUEUE) {
if ((priv->rtllib->rf_power_state == rf_off) ||
!priv->up || priv->reset_in_progress) {
kfree_skb(skb);
return 0;
}
}
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
if (queue_index == TXCMD_QUEUE) {
_rtl92e_tx_cmd(dev, skb);
return 0;
}
tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
tcb_desc->bTxEnableFwCalcDur = 1;
skb_push(skb, priv->rtllib->tx_headroom);
ret = _rtl92e_tx(dev, skb);
if (ret != 0)
kfree_skb(skb);
return ret;
}
static void _rtl92e_tx_isr(struct net_device *dev, int prio)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
while (skb_queue_len(&ring->queue)) {
struct tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb;
if (prio != BEACON_QUEUE) {
if (entry->OWN)
return;
ring->idx = (ring->idx + 1) % ring->entries;
}
skb = __skb_dequeue(&ring->queue);
dma_unmap_single(&priv->pdev->dev, entry->TxBuffAddr,
skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
}
if (prio != BEACON_QUEUE)
tasklet_schedule(&priv->irq_tx_tasklet);
}
static void _rtl92e_tx_cmd(struct net_device *dev, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtl8192_tx_ring *ring;
struct tx_desc_cmd *entry;
unsigned int idx;
struct cb_desc *tcb_desc;
unsigned long flags;
spin_lock_irqsave(&priv->irq_th_lock, flags);
ring = &priv->tx_ring[TXCMD_QUEUE];
idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
entry = (struct tx_desc_cmd *)&ring->desc[idx];
tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
rtl92e_fill_tx_cmd_desc(dev, entry, tcb_desc, skb);
__skb_queue_tail(&ring->queue, skb);
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
}
static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtl8192_tx_ring *ring;
unsigned long flags;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
struct tx_desc *pdesc = NULL;
struct rtllib_hdr_1addr *header = NULL;
u8 *pda_addr = NULL;
int idx;
u32 fwinfo_size = 0;
priv->rtllib->bAwakePktSent = true;
fwinfo_size = sizeof(struct tx_fwinfo_8190pci);
header = (struct rtllib_hdr_1addr *)(((u8 *)skb->data) + fwinfo_size);
pda_addr = header->addr1;
if (!is_broadcast_ether_addr(pda_addr) && !is_multicast_ether_addr(pda_addr))
priv->stats.txbytesunicast += skb->len - fwinfo_size;
spin_lock_irqsave(&priv->irq_th_lock, flags);
ring = &priv->tx_ring[tcb_desc->queue_index];
if (tcb_desc->queue_index != BEACON_QUEUE)
idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
else
idx = 0;
pdesc = &ring->desc[idx];
if ((pdesc->OWN == 1) && (tcb_desc->queue_index != BEACON_QUEUE)) {
netdev_warn(dev,
"No more TX desc@%d, ring->idx = %d, idx = %d, skblen = 0x%x queuelen=%d",
tcb_desc->queue_index, ring->idx, idx, skb->len,
skb_queue_len(&ring->queue));
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
return skb->len;
}
rtl92e_fill_tx_desc(dev, pdesc, tcb_desc, skb);
__skb_queue_tail(&ring->queue, skb);
pdesc->OWN = 1;
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
netif_trans_update(dev);
rtl92e_writew(dev, TP_POLL, 0x01 << tcb_desc->queue_index);
return 0;
}
static short _rtl92e_alloc_rx_ring(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rx_desc *entry = NULL;
int i, rx_queue_idx;
for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) {
priv->rx_ring[rx_queue_idx] = dma_alloc_coherent(&priv->pdev->dev,
sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount,
&priv->rx_ring_dma[rx_queue_idx],
GFP_ATOMIC);
if (!priv->rx_ring[rx_queue_idx] ||
(unsigned long)priv->rx_ring[rx_queue_idx] & 0xFF) {
netdev_warn(dev, "Cannot allocate RX ring\n");
return -ENOMEM;
}
priv->rx_idx[rx_queue_idx] = 0;
for (i = 0; i < priv->rxringcount; i++) {
struct sk_buff *skb = dev_alloc_skb(priv->rxbuffersize);
dma_addr_t *mapping;
entry = &priv->rx_ring[rx_queue_idx][i];
if (!skb)
return 0;
skb->dev = dev;
priv->rx_buf[rx_queue_idx][i] = skb;
mapping = (dma_addr_t *)skb->cb;
*mapping = dma_map_single(&priv->pdev->dev,
skb_tail_pointer(skb),
priv->rxbuffersize, DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, *mapping)) {
dev_kfree_skb_any(skb);
return -1;
}
entry->BufferAddress = *mapping;
entry->Length = priv->rxbuffersize;
entry->OWN = 1;
}
if (entry)
entry->EOR = 1;
}
return 0;
}
static int _rtl92e_alloc_tx_ring(struct net_device *dev, unsigned int prio,
unsigned int entries)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct tx_desc *ring;
dma_addr_t dma;
int i;
ring = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ring) * entries,
&dma, GFP_ATOMIC);
if (!ring || (unsigned long)ring & 0xFF) {
netdev_warn(dev, "Cannot allocate TX ring (prio = %d)\n", prio);
return -ENOMEM;
}
priv->tx_ring[prio].desc = ring;
priv->tx_ring[prio].dma = dma;
priv->tx_ring[prio].idx = 0;
priv->tx_ring[prio].entries = entries;
skb_queue_head_init(&priv->tx_ring[prio].queue);
for (i = 0; i < entries; i++)
ring[i].NextDescAddress =
(u32)dma + ((i + 1) % entries) *
sizeof(*ring);
return 0;
}
static short _rtl92e_pci_initdescring(struct net_device *dev)
{
u32 ret;
int i;
struct r8192_priv *priv = rtllib_priv(dev);
ret = _rtl92e_alloc_rx_ring(dev);
if (ret)
return ret;
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) {
ret = _rtl92e_alloc_tx_ring(dev, i, priv->txringcount);
if (ret)
goto err_free_rings;
}
return 0;
err_free_rings:
_rtl92e_free_rx_ring(dev);
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
if (priv->tx_ring[i].desc)
_rtl92e_free_tx_ring(dev, i);
return 1;
}
void rtl92e_reset_desc_ring(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
int i, rx_queue_idx;
unsigned long flags = 0;
for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) {
if (priv->rx_ring[rx_queue_idx]) {
struct rx_desc *entry = NULL;
for (i = 0; i < priv->rxringcount; i++) {
entry = &priv->rx_ring[rx_queue_idx][i];
entry->OWN = 1;
}
priv->rx_idx[rx_queue_idx] = 0;
}
}
spin_lock_irqsave(&priv->irq_th_lock, flags);
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) {
if (priv->tx_ring[i].desc) {
struct rtl8192_tx_ring *ring = &priv->tx_ring[i];
while (skb_queue_len(&ring->queue)) {
struct tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb =
__skb_dequeue(&ring->queue);
dma_unmap_single(&priv->pdev->dev,
entry->TxBuffAddr, skb->len,
DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
ring->idx = 0;
}
}
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
}
void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
struct rtllib_rx_stats *stats)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (stats->bIsAMPDU && !stats->bFirstMPDU)
stats->mac_time = priv->last_rx_desc_tsf;
else
priv->last_rx_desc_tsf = stats->mac_time;
}
long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index)
{
long signal_power;
signal_power = (long)((signal_strength_index + 1) >> 1);
signal_power -= 95;
return signal_power;
}
void rtl92e_update_rx_statistics(struct r8192_priv *priv,
struct rtllib_rx_stats *pprevious_stats)
{
int weighting = 0;
if (priv->stats.recv_signal_power == 0)
priv->stats.recv_signal_power =
pprevious_stats->RecvSignalPower;
if (pprevious_stats->RecvSignalPower > priv->stats.recv_signal_power)
weighting = 5;
else if (pprevious_stats->RecvSignalPower <
priv->stats.recv_signal_power)
weighting = (-5);
priv->stats.recv_signal_power = (priv->stats.recv_signal_power * 5 +
pprevious_stats->RecvSignalPower +
weighting) / 6;
}
u8 rtl92e_rx_db_to_percent(s8 antpower)
{
if ((antpower <= -100) || (antpower >= 20))
return 0;
else if (antpower >= 0)
return 100;
else
return 100 + antpower;
} /* QueryRxPwrPercentage */
u8 rtl92e_evm_db_to_percent(s8 value)
{
s8 ret_val = clamp(-value, 0, 33) * 3;
if (ret_val == 99)
ret_val = 100;
return ret_val;
}
void rtl92e_copy_mpdu_stats(struct rtllib_rx_stats *psrc_stats,
struct rtllib_rx_stats *ptarget_stats)
{
ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
}
static void _rtl92e_rx_normal(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_hdr_1addr *rtllib_hdr = NULL;
bool unicast_packet = false;
u32 skb_len = 0;
int rx_queue_idx = RX_MPDU_QUEUE;
struct rtllib_rx_stats stats = {
.signal = 0,
.noise = (u8)-98,
.rate = 0,
};
unsigned int count = priv->rxringcount;
stats.nic_type = NIC_8192E;
while (count--) {
struct rx_desc *pdesc = &priv->rx_ring[rx_queue_idx]
[priv->rx_idx[rx_queue_idx]];
struct sk_buff *skb = priv->rx_buf[rx_queue_idx]
[priv->rx_idx[rx_queue_idx]];
struct sk_buff *new_skb;
if (pdesc->OWN)
return;
if (!rtl92e_get_rx_stats(dev, &stats, pdesc, skb))
goto done;
new_skb = dev_alloc_skb(priv->rxbuffersize);
/* if allocation of new skb failed - drop current packet
* and reuse skb
*/
if (unlikely(!new_skb))
goto done;
dma_unmap_single(&priv->pdev->dev, *((dma_addr_t *)skb->cb),
priv->rxbuffersize, DMA_FROM_DEVICE);
skb_put(skb, pdesc->Length);
skb_reserve(skb, stats.RxDrvInfoSize +
stats.RxBufShift);
skb_trim(skb, skb->len - S_CRC_LEN);
rtllib_hdr = (struct rtllib_hdr_1addr *)skb->data;
if (!is_multicast_ether_addr(rtllib_hdr->addr1)) {
/* unicast packet */
unicast_packet = true;
}
skb_len = skb->len;
if (!rtllib_rx(priv->rtllib, skb, &stats)) {
dev_kfree_skb_any(skb);
} else {
if (unicast_packet)
priv->stats.rxbytesunicast += skb_len;
}
skb = new_skb;
skb->dev = dev;
priv->rx_buf[rx_queue_idx][priv->rx_idx[rx_queue_idx]] =
skb;
*((dma_addr_t *)skb->cb) = dma_map_single(&priv->pdev->dev,
skb_tail_pointer(skb),
priv->rxbuffersize, DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, *((dma_addr_t *)skb->cb))) {
dev_kfree_skb_any(skb);
return;
}
done:
pdesc->BufferAddress = *((dma_addr_t *)skb->cb);
pdesc->OWN = 1;
pdesc->Length = priv->rxbuffersize;
if (priv->rx_idx[rx_queue_idx] == priv->rxringcount - 1)
pdesc->EOR = 1;
priv->rx_idx[rx_queue_idx] = (priv->rx_idx[rx_queue_idx] + 1) %
priv->rxringcount;
}
}
static void _rtl92e_tx_resume(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
struct sk_buff *skb;
int queue_index;
for (queue_index = BK_QUEUE;
queue_index < MAX_QUEUE_SIZE; queue_index++) {
while ((!skb_queue_empty(&ieee->skb_waitQ[queue_index])) &&
(priv->rtllib->check_nic_enough_desc(dev, queue_index) > 0)) {
skb = skb_dequeue(&ieee->skb_waitQ[queue_index]);
ieee->softmac_data_hard_start_xmit(skb, dev, 0);
}
}
}
static void _rtl92e_irq_tx_tasklet(struct tasklet_struct *t)
{
struct r8192_priv *priv = from_tasklet(priv, t, irq_tx_tasklet);
_rtl92e_tx_resume(priv->rtllib->dev);
}
static void _rtl92e_irq_rx_tasklet(struct tasklet_struct *t)
{
struct r8192_priv *priv = from_tasklet(priv, t, irq_rx_tasklet);
_rtl92e_rx_normal(priv->rtllib->dev);
rtl92e_writel(priv->rtllib->dev, INTA_MASK,
rtl92e_readl(priv->rtllib->dev, INTA_MASK) | IMR_RDU);
}
/****************************************************************************
* ---------------------------- NIC START/CLOSE STUFF---------------------------
****************************************************************************/
static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv)
{
cancel_delayed_work_sync(&priv->watch_dog_wq);
cancel_delayed_work_sync(&priv->update_beacon_wq);
cancel_delayed_work(&priv->rtllib->hw_sleep_wq);
cancel_work_sync(&priv->reset_wq);
cancel_work_sync(&priv->qos_activate);
}
static int _rtl92e_up(struct net_device *dev, bool is_silent_reset)
{
if (_rtl92e_sta_up(dev, is_silent_reset) == -1)
return -1;
return 0;
}
static int _rtl92e_open(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
mutex_lock(&priv->wx_mutex);
ret = _rtl92e_try_up(dev);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_try_up(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->up == 1)
return -1;
return _rtl92e_up(dev, false);
}
static int _rtl92e_close(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
if ((rtllib_act_scanning(priv->rtllib, false)) &&
!(priv->rtllib->softmac_features & IEEE_SOFTMAC_SCAN)) {
rtllib_stop_scan(priv->rtllib);
}
mutex_lock(&priv->wx_mutex);
ret = _rtl92e_down(dev, true);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_down(struct net_device *dev, bool shutdownrf)
{
if (_rtl92e_sta_down(dev, shutdownrf) == -1)
return -1;
return 0;
}
void rtl92e_commit(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->up == 0)
return;
rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
rtl92e_irq_disable(dev);
rtl92e_stop_adapter(dev, true);
_rtl92e_up(dev, false);
}
static void _rtl92e_restart(void *data)
{
struct r8192_priv *priv = container_of(data, struct r8192_priv, reset_wq);
struct net_device *dev = priv->rtllib->dev;
mutex_lock(&priv->wx_mutex);
rtl92e_commit(dev);
mutex_unlock(&priv->wx_mutex);
}
static void _rtl92e_set_multicast(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
short promisc;
promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
priv->promisc = promisc;
}
static int _rtl92e_set_mac_adr(struct net_device *dev, void *mac)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct sockaddr *addr = mac;
mutex_lock(&priv->wx_mutex);
eth_hw_addr_set(dev, addr->sa_data);
schedule_work(&priv->reset_wq);
mutex_unlock(&priv->wx_mutex);
return 0;
}
static irqreturn_t _rtl92e_irq(int irq, void *netdev)
{
struct net_device *dev = netdev;
struct r8192_priv *priv = rtllib_priv(dev);
unsigned long flags;
u32 inta;
if (priv->irq_enabled == 0)
goto done;
spin_lock_irqsave(&priv->irq_th_lock, flags);
rtl92e_ack_irq(dev, &inta);
if (!inta) {
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
goto done;
}
if (inta == 0xffff) {
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
goto done;
}
if (!netif_running(dev)) {
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
goto done;
}
if (inta & IMR_MGNTDOK) {
_rtl92e_tx_isr(dev, MGNT_QUEUE);
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
if (priv->rtllib->ack_tx_to_ieee) {
if (_rtl92e_is_tx_queue_empty(dev)) {
priv->rtllib->ack_tx_to_ieee = 0;
rtllib_ps_tx_ack(priv->rtllib, 1);
}
}
spin_lock_irqsave(&priv->irq_th_lock, flags);
}
if (inta & IMR_COMDOK)
_rtl92e_tx_isr(dev, TXCMD_QUEUE);
if (inta & IMR_HIGHDOK)
_rtl92e_tx_isr(dev, HIGH_QUEUE);
if (inta & IMR_ROK)
tasklet_schedule(&priv->irq_rx_tasklet);
if (inta & IMR_BcnInt)
tasklet_schedule(&priv->irq_prepare_beacon_tasklet);
if (inta & IMR_RDU) {
rtl92e_writel(dev, INTA_MASK,
rtl92e_readl(dev, INTA_MASK) & ~IMR_RDU);
tasklet_schedule(&priv->irq_rx_tasklet);
}
if (inta & IMR_RXFOVW)
tasklet_schedule(&priv->irq_rx_tasklet);
if (inta & IMR_BKDOK) {
priv->rtllib->link_detect_info.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, BK_QUEUE);
}
if (inta & IMR_BEDOK) {
priv->rtllib->link_detect_info.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, BE_QUEUE);
}
if (inta & IMR_VIDOK) {
priv->rtllib->link_detect_info.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, VI_QUEUE);
}
if (inta & IMR_VODOK) {
priv->rtllib->link_detect_info.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, VO_QUEUE);
}
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
done:
return IRQ_HANDLED;
}
/****************************************************************************
* ---------------------------- PCI_STUFF---------------------------
****************************************************************************/
static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_open = _rtl92e_open,
.ndo_stop = _rtl92e_close,
.ndo_tx_timeout = _rtl92e_tx_timeout,
.ndo_set_rx_mode = _rtl92e_set_multicast,
.ndo_set_mac_address = _rtl92e_set_mac_adr,
.ndo_validate_addr = eth_validate_addr,
.ndo_start_xmit = rtllib_xmit,
};
static int _rtl92e_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
unsigned long ioaddr = 0;
struct net_device *dev = NULL;
struct r8192_priv *priv = NULL;
unsigned long pmem_start, pmem_len, pmem_flags;
int err = -ENOMEM;
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "Failed to enable PCI device");
return -EIO;
}
pci_set_master(pdev);
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dev_info(&pdev->dev,
"Unable to obtain 32bit DMA for consistent allocations\n");
goto err_pci_disable;
}
}
dev = alloc_rtllib(sizeof(struct r8192_priv));
if (!dev)
goto err_pci_disable;
err = -ENODEV;
pci_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
priv = rtllib_priv(dev);
priv->rtllib = (struct rtllib_device *)netdev_priv_rsl(dev);
priv->pdev = pdev;
priv->rtllib->pdev = pdev;
if ((pdev->subsystem_vendor == PCI_VENDOR_ID_DLINK) &&
(pdev->subsystem_device == 0x3304))
priv->rtllib->bSupportRemoteWakeUp = 1;
else
priv->rtllib->bSupportRemoteWakeUp = 0;
pmem_start = pci_resource_start(pdev, 1);
pmem_len = pci_resource_len(pdev, 1);
pmem_flags = pci_resource_flags(pdev, 1);
if (!(pmem_flags & IORESOURCE_MEM)) {
netdev_err(dev, "region #1 not a MMIO resource, aborting");
goto err_rel_rtllib;
}
dev_info(&pdev->dev, "Memory mapped space start: 0x%08lx\n",
pmem_start);
if (!request_mem_region(pmem_start, pmem_len, DRV_NAME)) {
netdev_err(dev, "request_mem_region failed!");
goto err_rel_rtllib;
}
ioaddr = (unsigned long)ioremap(pmem_start, pmem_len);
if (ioaddr == (unsigned long)NULL) {
netdev_err(dev, "ioremap failed!");
goto err_rel_mem;
}
dev->mem_start = ioaddr;
dev->mem_end = ioaddr + pci_resource_len(pdev, 0);
if (!rtl92e_check_adapter(pdev, dev))
goto err_unmap;
dev->irq = pdev->irq;
priv->irq = 0;
dev->netdev_ops = &rtl8192_netdev_ops;
dev->wireless_handlers = &r8192_wx_handlers_def;
dev->ethtool_ops = &rtl819x_ethtool_ops;
dev->type = ARPHRD_ETHER;
dev->watchdog_timeo = HZ * 3;
if (dev_alloc_name(dev, ifname) < 0)
dev_alloc_name(dev, ifname);
if (_rtl92e_init(dev) != 0) {
netdev_warn(dev, "Initialization failed");
goto err_free_irq;
}
netif_carrier_off(dev);
netif_stop_queue(dev);
if (register_netdev(dev))
goto err_free_irq;
if (priv->polling_timer_on == 0)
rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
return 0;
err_free_irq:
free_irq(dev->irq, dev);
priv->irq = 0;
err_unmap:
iounmap((void __iomem *)ioaddr);
err_rel_mem:
release_mem_region(pmem_start, pmem_len);
err_rel_rtllib:
free_rtllib(dev);
err_pci_disable:
pci_disable_device(pdev);
return err;
}
static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct r8192_priv *priv;
u32 i;
if (dev) {
unregister_netdev(dev);
priv = rtllib_priv(dev);
del_timer_sync(&priv->gpio_polling_timer);
cancel_delayed_work_sync(&priv->gpio_change_rf_wq);
priv->polling_timer_on = 0;
_rtl92e_down(dev, true);
rtl92e_dm_deinit(dev);
vfree(priv->fw_info);
priv->fw_info = NULL;
_rtl92e_free_rx_ring(dev);
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
_rtl92e_free_tx_ring(dev, i);
if (priv->irq) {
dev_info(&pdev->dev, "Freeing irq %d\n", dev->irq);
free_irq(dev->irq, dev);
priv->irq = 0;
}
if (dev->mem_start != 0) {
iounmap((void __iomem *)dev->mem_start);
release_mem_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
}
free_rtllib(dev);
}
pci_disable_device(pdev);
}
bool rtl92e_enable_nic(struct net_device *dev)
{
bool init_status = true;
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
(&priv->rtllib->pwr_save_ctrl);
if (!priv->up) {
netdev_warn(dev, "%s(): Driver is already down!\n", __func__);
return false;
}
init_status = rtl92e_start_adapter(dev);
if (!init_status) {
netdev_warn(dev, "%s(): Initialization failed!\n", __func__);
return false;
}
RT_CLEAR_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC);
rtl92e_irq_enable(dev);
return init_status;
}
module_pci_driver(rtl8192_pci_driver);
void rtl92e_check_rfctrl_gpio_timer(struct timer_list *t)
{
struct r8192_priv *priv = from_timer(priv, t, gpio_polling_timer);
priv->polling_timer_on = 1;
schedule_delayed_work(&priv->gpio_change_rf_wq, 0);
mod_timer(&priv->gpio_polling_timer, jiffies +
msecs_to_jiffies(RTLLIB_WATCH_DOG_TIME));
}
/***************************************************************************
* ------------------- module init / exit stubs ----------------
***************************************************************************/
MODULE_DESCRIPTION("Linux driver for Realtek RTL819x WiFi cards");
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(RTL8192E_BOOT_IMG_FW);
MODULE_FIRMWARE(RTL8192E_MAIN_IMG_FW);
MODULE_FIRMWARE(RTL8192E_DATA_IMG_FW);
module_param(ifname, charp, 0644);
module_param(hwwep, int, 0644);
MODULE_PARM_DESC(ifname, " Net interface name, wlan%d=default");
MODULE_PARM_DESC(hwwep, " Try to use hardware WEP support(default use hw. set 0 to use software security)");
| linux-master | drivers/staging/rtl8192e/rtl8192e/rtl_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtl_core.h"
#include "r8192E_hw.h"
#include "r8192E_cmdpkt.h"
bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
u32 len)
{
bool rt_status = true;
struct r8192_priv *priv = rtllib_priv(dev);
u16 frag_length = 0, frag_offset = 0;
struct sk_buff *skb;
unsigned char *seg_ptr;
struct cb_desc *tcb_desc;
u8 bLastIniPkt;
struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
do {
if ((len - frag_offset) > CMDPACKET_FRAG_SIZE) {
frag_length = CMDPACKET_FRAG_SIZE;
bLastIniPkt = 0;
} else {
frag_length = (u16)(len - frag_offset);
bLastIniPkt = 1;
}
if (type == DESC_PACKET_TYPE_NORMAL)
skb = dev_alloc_skb(frag_length +
priv->rtllib->tx_headroom + 4);
else
skb = dev_alloc_skb(frag_length + 4);
if (!skb) {
rt_status = false;
goto Failed;
}
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = type;
tcb_desc->bLastIniPkt = bLastIniPkt;
if (type == DESC_PACKET_TYPE_NORMAL) {
tcb_desc->pkt_size = frag_length;
seg_ptr = skb_put(skb, priv->rtllib->tx_headroom);
pTxFwInfo = (struct tx_fwinfo_8190pci *)seg_ptr;
memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
memset(pTxFwInfo, 0x12, 8);
} else {
tcb_desc->txbuf_size = frag_length;
}
skb_put_data(skb, data, frag_length);
if (type == DESC_PACKET_TYPE_INIT &&
(!priv->rtllib->check_nic_enough_desc(dev, TXCMD_QUEUE) ||
(!skb_queue_empty(&priv->rtllib->skb_waitQ[TXCMD_QUEUE])) ||
(priv->rtllib->queue_stop))) {
skb_queue_tail(&priv->rtllib->skb_waitQ[TXCMD_QUEUE],
skb);
} else {
priv->rtllib->softmac_hard_start_xmit(skb, dev);
}
data += frag_length;
frag_offset += frag_length;
} while (frag_offset < len);
rtl92e_writeb(dev, TP_POLL, TP_POLL_CQ);
Failed:
return rt_status;
}
| linux-master | drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <[email protected]>, et al.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtl_core.h"
#include "rtl_eeprom.h"
static void _rtl92e_gpio_write_bit(struct net_device *dev, int no, bool val)
{
u8 reg = rtl92e_readb(dev, EPROM_CMD);
if (val)
reg |= 1 << no;
else
reg &= ~(1 << no);
rtl92e_writeb(dev, EPROM_CMD, reg);
udelay(EPROM_DELAY);
}
static bool _rtl92e_gpio_get_bit(struct net_device *dev, int no)
{
u8 reg = rtl92e_readb(dev, EPROM_CMD);
return (reg >> no) & 0x1;
}
static void _rtl92e_eeprom_ck_cycle(struct net_device *dev)
{
_rtl92e_gpio_write_bit(dev, EPROM_CK_BIT, 1);
_rtl92e_gpio_write_bit(dev, EPROM_CK_BIT, 0);
}
static u16 _rtl92e_eeprom_xfer(struct net_device *dev, u16 data, int tx_len)
{
u16 ret = 0;
int rx_len = 16;
_rtl92e_gpio_write_bit(dev, EPROM_CS_BIT, 1);
_rtl92e_eeprom_ck_cycle(dev);
while (tx_len--) {
_rtl92e_gpio_write_bit(dev, EPROM_W_BIT,
(data >> tx_len) & 0x1);
_rtl92e_eeprom_ck_cycle(dev);
}
_rtl92e_gpio_write_bit(dev, EPROM_W_BIT, 0);
while (rx_len--) {
_rtl92e_eeprom_ck_cycle(dev);
ret |= _rtl92e_gpio_get_bit(dev, EPROM_R_BIT) << rx_len;
}
_rtl92e_gpio_write_bit(dev, EPROM_CS_BIT, 0);
_rtl92e_eeprom_ck_cycle(dev);
return ret;
}
u32 rtl92e_eeprom_read(struct net_device *dev, u32 addr)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 ret = 0;
rtl92e_writeb(dev, EPROM_CMD,
(EPROM_CMD_PROGRAM << EPROM_CMD_OPERATING_MODE_SHIFT));
udelay(EPROM_DELAY);
/* EEPROM is configured as x16 */
if (priv->epromtype == EEPROM_93C56)
ret = _rtl92e_eeprom_xfer(dev, (addr & 0xFF) | (0x6 << 8), 11);
else
ret = _rtl92e_eeprom_xfer(dev, (addr & 0x3F) | (0x6 << 6), 9);
rtl92e_writeb(dev, EPROM_CMD,
(EPROM_CMD_NORMAL << EPROM_CMD_OPERATING_MODE_SHIFT));
return ret;
}
| linux-master | drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information: wlanfae <[email protected]>
*/
#include <linux/string.h>
#include "rtl_core.h"
#include "rtl_wx.h"
#define RATE_COUNT 12
static u32 rtl8192_rates[] = {
1000000, 2000000, 5500000, 11000000, 6000000, 9000000, 12000000,
18000000, 24000000, 36000000, 48000000, 54000000
};
#ifndef ENETDOWN
#define ENETDOWN 1
#endif
static int _rtl92e_wx_get_freq(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_freq(priv->rtllib, a, wrqu, b);
}
static int _rtl92e_wx_get_mode(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_mode(priv->rtllib, a, wrqu, b);
}
static int _rtl92e_wx_get_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_rate(priv->rtllib, info, wrqu, extra);
}
static int _rtl92e_wx_set_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_rate(priv->rtllib, info, wrqu, extra);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_set_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_rts(priv->rtllib, info, wrqu, extra);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_get_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_rts(priv->rtllib, info, wrqu, extra);
}
static int _rtl92e_wx_set_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->hw_radio_off) {
netdev_warn(dev, "%s(): Can't set Power: Radio is Off.\n",
__func__);
return 0;
}
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_power(priv->rtllib, info, wrqu, extra);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_get_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_power(priv->rtllib, info, wrqu, extra);
}
static int _rtl92e_wx_set_rawtx(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_rawtx(priv->rtllib, info, wrqu, extra);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_force_reset(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
mutex_lock(&priv->wx_mutex);
priv->force_reset = *extra;
mutex_unlock(&priv->wx_mutex);
return 0;
}
static int _rtl92e_wx_adapter_power_status(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
(&priv->rtllib->pwr_save_ctrl);
struct rtllib_device *ieee = priv->rtllib;
mutex_lock(&priv->wx_mutex);
if (*extra || priv->force_lps) {
priv->ps_force = false;
psc->bLeisurePs = true;
} else {
if (priv->rtllib->link_state == MAC80211_LINKED)
rtl92e_leisure_ps_leave(dev);
priv->ps_force = true;
psc->bLeisurePs = false;
ieee->ps = *extra;
}
mutex_unlock(&priv->wx_mutex);
return 0;
}
static int _rtl92e_wx_set_lps_awake_interval(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
(&priv->rtllib->pwr_save_ctrl);
mutex_lock(&priv->wx_mutex);
netdev_info(dev, "%s(): set lps awake interval ! extra is %d\n",
__func__, *extra);
psc->reg_max_lps_awake_intvl = *extra;
mutex_unlock(&priv->wx_mutex);
return 0;
}
static int _rtl92e_wx_set_force_lps(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
mutex_lock(&priv->wx_mutex);
netdev_info(dev,
"%s(): force LPS ! extra is %d (1 is open 0 is close)\n",
__func__, *extra);
priv->force_lps = *extra;
mutex_unlock(&priv->wx_mutex);
return 0;
}
static int _rtl92e_wx_set_debug(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 c = *extra;
if (priv->hw_radio_off)
return 0;
netdev_info(dev, "=====>%s(), *extra:%x, debugflag:%x\n", __func__,
*extra, rt_global_debug_component);
if (c > 0)
rt_global_debug_component |= (1 << c);
else
rt_global_debug_component &= BIT31;
return 0;
}
static int _rtl92e_wx_set_mode(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = netdev_priv_rsl(dev);
enum rt_rf_power_state rt_state;
int ret;
if (priv->hw_radio_off)
return 0;
rt_state = priv->rtllib->rf_power_state;
mutex_lock(&priv->wx_mutex);
if (wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_MONITOR ||
ieee->net_promiscuous_md) {
if (rt_state == rf_off) {
if (priv->rtllib->rf_off_reason >
RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
mutex_unlock(&priv->wx_mutex);
return -1;
}
netdev_info(dev,
"=========>%s(): rtl92e_ips_leave\n",
__func__);
mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
mutex_unlock(&priv->rtllib->ips_mutex);
}
}
ret = rtllib_wx_set_mode(priv->rtllib, a, wrqu, b);
mutex_unlock(&priv->wx_mutex);
return ret;
}
struct iw_range_with_scan_capa {
/* Informative stuff (to choose between different interface) */
__u32 throughput; /* To give an idea... */
/* In theory this value should be the maximum benchmarked
* TCP/IP throughput, because with most of these devices the
* bit rate is meaningless (overhead an co) to estimate how
* fast the connection will go and pick the fastest one.
* I suggest people to play with Netperf or any benchmark...
*/
/* NWID (or domain id) */
__u32 min_nwid; /* Minimal NWID we are able to set */
__u32 max_nwid; /* Maximal NWID we are able to set */
/* Old Frequency (backward compat - moved lower ) */
__u16 old_num_channels;
__u8 old_num_frequency;
/* Scan capabilities */
__u8 scan_capa;
};
static int _rtl92e_wx_get_range(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_range *range = (struct iw_range *)extra;
struct r8192_priv *priv = rtllib_priv(dev);
u16 val;
int i;
wrqu->data.length = sizeof(*range);
memset(range, 0, sizeof(*range));
/* ~130 Mb/s real (802.11n) */
range->throughput = 130 * 1000 * 1000;
range->max_qual.qual = 100;
range->max_qual.level = 0;
range->max_qual.noise = 0;
range->max_qual.updated = 7; /* Updated all three */
range->avg_qual.qual = 70; /* > 8% missed beacons is 'bad' */
range->avg_qual.level = 0;
range->avg_qual.noise = 0;
range->avg_qual.updated = 7; /* Updated all three */
range->num_bitrates = min(RATE_COUNT, IW_MAX_BITRATES);
for (i = 0; i < range->num_bitrates; i++)
range->bitrate[i] = rtl8192_rates[i];
range->max_rts = DEFAULT_RTS_THRESHOLD;
range->min_frag = MIN_FRAG_THRESHOLD;
range->max_frag = MAX_FRAG_THRESHOLD;
range->min_pmp = 0;
range->max_pmp = 5000000;
range->min_pmt = 0;
range->max_pmt = 65535 * 1000;
range->pmp_flags = IW_POWER_PERIOD;
range->pmt_flags = IW_POWER_TIMEOUT;
range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 18;
for (i = 0, val = 0; i < 14; i++) {
if ((priv->rtllib->active_channel_map)[i + 1]) {
s32 freq_khz;
range->freq[val].i = i + 1;
freq_khz = ieee80211_channel_to_freq_khz(i + 1, NL80211_BAND_2GHZ);
range->freq[val].m = freq_khz * 100;
range->freq[val].e = 1;
val++;
}
if (val == IW_MAX_FREQUENCIES)
break;
}
range->num_frequency = val;
range->num_channels = val;
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
/* Event capability (kernel + driver) */
return 0;
}
static int _rtl92e_wx_set_scan(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
enum rt_rf_power_state rt_state;
int ret;
if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) {
if ((ieee->link_state >= RTLLIB_ASSOCIATING) &&
(ieee->link_state <= RTLLIB_ASSOCIATING_AUTHENTICATED))
return 0;
if ((priv->rtllib->link_state == MAC80211_LINKED) &&
(priv->rtllib->CntAfterLink < 2))
return 0;
}
if (priv->hw_radio_off) {
netdev_info(dev, "================>%s(): hwradio off\n",
__func__);
return 0;
}
rt_state = priv->rtllib->rf_power_state;
if (!priv->up)
return -ENETDOWN;
if (priv->rtllib->link_detect_info.bBusyTraffic)
return -EAGAIN;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
struct iw_scan_req *req = (struct iw_scan_req *)b;
if (req->essid_len) {
int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
ieee->current_network.ssid_len = len;
memcpy(ieee->current_network.ssid, req->essid, len);
}
}
mutex_lock(&priv->wx_mutex);
priv->rtllib->FirstIe_InScan = true;
if (priv->rtllib->link_state != MAC80211_LINKED) {
if (rt_state == rf_off) {
if (priv->rtllib->rf_off_reason >
RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
mutex_unlock(&priv->wx_mutex);
return -1;
}
mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
mutex_unlock(&priv->rtllib->ips_mutex);
}
rtllib_stop_scan(priv->rtllib);
if (priv->rtllib->rf_power_state != rf_off) {
priv->rtllib->actscanning = true;
ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP);
rtllib_start_scan_syncro(priv->rtllib);
ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE);
}
ret = 0;
} else {
priv->rtllib->actscanning = true;
ret = rtllib_wx_set_scan(priv->rtllib, a, wrqu, b);
}
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_get_scan(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (!priv->up)
return -ENETDOWN;
if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_get_scan(priv->rtllib, a, wrqu, b);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_set_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
if (priv->hw_radio_off) {
netdev_info(dev,
"=========>%s():hw radio off,or Rf state is rf_off, return\n",
__func__);
return 0;
}
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_essid(priv->rtllib, a, wrqu, b);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_get_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_get_essid(priv->rtllib, a, wrqu, b);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_set_nick(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (wrqu->data.length > IW_ESSID_MAX_SIZE)
return -E2BIG;
mutex_lock(&priv->wx_mutex);
wrqu->data.length = min_t(size_t, wrqu->data.length,
sizeof(priv->nick));
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
mutex_unlock(&priv->wx_mutex);
return 0;
}
static int _rtl92e_wx_get_nick(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
mutex_lock(&priv->wx_mutex);
wrqu->data.length = strlen(priv->nick);
memcpy(extra, priv->nick, wrqu->data.length);
wrqu->data.flags = 1; /* active */
mutex_unlock(&priv->wx_mutex);
return 0;
}
static int _rtl92e_wx_set_freq(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_freq(priv->rtllib, a, wrqu, b);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_get_name(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_name(priv->rtllib, info, wrqu, extra);
}
static int _rtl92e_wx_set_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->hw_radio_off)
return 0;
if (wrqu->frag.disabled) {
priv->rtllib->fts = DEFAULT_FRAG_THRESHOLD;
} else {
if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
wrqu->frag.value > MAX_FRAG_THRESHOLD)
return -EINVAL;
priv->rtllib->fts = wrqu->frag.value & ~0x1;
}
return 0;
}
static int _rtl92e_wx_get_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
wrqu->frag.value = priv->rtllib->fts;
wrqu->frag.fixed = 0; /* no auto select */
wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD);
return 0;
}
static int _rtl92e_wx_set_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *awrq, char *extra)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_wap(priv->rtllib, info, awrq, extra);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_get_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_wap(priv->rtllib, info, wrqu, extra);
}
static int _rtl92e_wx_get_enc(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_encode(priv->rtllib, info, wrqu, key);
}
static int _rtl92e_wx_set_enc(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key)
{
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
struct rtllib_device *ieee = priv->rtllib;
u32 hwkey[4] = {0, 0, 0, 0};
u8 mask = 0xff;
u32 key_idx = 0;
u8 zero_addr[4][6] = {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x03} };
int i;
if (priv->hw_radio_off)
return 0;
if (!priv->up)
return -ENETDOWN;
priv->rtllib->wx_set_enc = 1;
mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
mutex_unlock(&priv->rtllib->ips_mutex);
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_encode(priv->rtllib, info, wrqu, key);
mutex_unlock(&priv->wx_mutex);
if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA;
rtl92e_cam_reset(dev);
memset(priv->rtllib->swcamtable, 0,
sizeof(struct sw_cam_table) * 32);
goto end_hw_sec;
}
if (wrqu->encoding.length != 0) {
for (i = 0; i < 4; i++) {
hwkey[i] |= key[4 * i + 0] & mask;
if (i == 1 && (4 * i + 1) == wrqu->encoding.length)
mask = 0x00;
if (i == 3 && (4 * i + 1) == wrqu->encoding.length)
mask = 0x00;
hwkey[i] |= (key[4 * i + 1] & mask) << 8;
hwkey[i] |= (key[4 * i + 2] & mask) << 16;
hwkey[i] |= (key[4 * i + 3] & mask) << 24;
}
switch (wrqu->encoding.flags & IW_ENCODE_INDEX) {
case 0:
key_idx = ieee->crypt_info.tx_keyidx;
break;
case 1:
key_idx = 0;
break;
case 2:
key_idx = 1;
break;
case 3:
key_idx = 2;
break;
case 4:
key_idx = 3;
break;
default:
break;
}
if (wrqu->encoding.length == 0x5) {
ieee->pairwise_key_type = KEY_TYPE_WEP40;
rtl92e_enable_hw_security_config(dev);
}
else if (wrqu->encoding.length == 0xd) {
ieee->pairwise_key_type = KEY_TYPE_WEP104;
rtl92e_enable_hw_security_config(dev);
rtl92e_set_key(dev, key_idx, key_idx, KEY_TYPE_WEP104,
zero_addr[key_idx], 0, hwkey);
rtl92e_set_swcam(dev, key_idx, key_idx, KEY_TYPE_WEP104,
zero_addr[key_idx], hwkey);
} else {
netdev_info(dev,
"wrong type in WEP, not WEP40 and WEP104\n");
}
}
end_hw_sec:
priv->rtllib->wx_set_enc = 0;
return ret;
}
static int _rtl92e_wx_set_scan_type(struct net_device *dev,
struct iw_request_info *aa,
union iwreq_data *wrqu, char *p)
{
struct r8192_priv *priv = rtllib_priv(dev);
int *parms = (int *)p;
int mode = parms[0];
if (priv->hw_radio_off)
return 0;
priv->rtllib->active_scan = mode;
return 1;
}
#define R8192_MAX_RETRY 255
static int _rtl92e_wx_set_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
int err = 0;
if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
wrqu->retry.disabled) {
err = -EINVAL;
goto exit;
}
if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) {
err = -EINVAL;
goto exit;
}
if (wrqu->retry.value > R8192_MAX_RETRY) {
err = -EINVAL;
goto exit;
}
if (wrqu->retry.flags & IW_RETRY_MAX)
priv->retry_rts = wrqu->retry.value;
else
priv->retry_data = wrqu->retry.value;
rtl92e_commit(dev);
exit:
mutex_unlock(&priv->wx_mutex);
return err;
}
static int _rtl92e_wx_get_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
wrqu->retry.disabled = 0; /* can't be disabled */
if ((wrqu->retry.flags & IW_RETRY_TYPE) ==
IW_RETRY_LIFETIME)
return -EINVAL;
if (wrqu->retry.flags & IW_RETRY_MAX) {
wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
wrqu->retry.value = priv->retry_rts;
} else {
wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
wrqu->retry.value = priv->retry_data;
}
return 0;
}
static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
priv->rtllib->wx_set_enc = 1;
mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
mutex_unlock(&priv->rtllib->ips_mutex);
ret = rtllib_wx_set_encode_ext(ieee, info, wrqu, extra);
{
const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
const u8 zero[ETH_ALEN] = {0};
u32 key[4] = {0};
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
struct iw_point *encoding = &wrqu->encoding;
u8 idx = 0, alg = 0, group = 0;
if ((encoding->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) {
ieee->pairwise_key_type = ieee->group_key_type
= KEY_TYPE_NA;
rtl92e_cam_reset(dev);
memset(priv->rtllib->swcamtable, 0,
sizeof(struct sw_cam_table) * 32);
goto end_hw_sec;
}
alg = (ext->alg == IW_ENCODE_ALG_CCMP) ? KEY_TYPE_CCMP :
ext->alg;
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx)
idx--;
group = ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY;
if ((!group) || (ieee->iw_mode == IW_MODE_ADHOC) ||
(alg == KEY_TYPE_WEP40)) {
if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40))
alg = KEY_TYPE_WEP104;
ieee->pairwise_key_type = alg;
rtl92e_enable_hw_security_config(dev);
}
memcpy((u8 *)key, ext->key, 16);
if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode != 2)) {
if (ext->key_len == 13)
ieee->pairwise_key_type = alg = KEY_TYPE_WEP104;
rtl92e_set_key(dev, idx, idx, alg, zero, 0, key);
rtl92e_set_swcam(dev, idx, idx, alg, zero, key);
} else if (group) {
ieee->group_key_type = alg;
rtl92e_set_key(dev, idx, idx, alg, broadcast_addr, 0,
key);
rtl92e_set_swcam(dev, idx, idx, alg, broadcast_addr, key);
} else {
if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) &&
ieee->ht_info->bCurrentHTSupport)
rtl92e_writeb(dev, 0x173, 1);
rtl92e_set_key(dev, 4, idx, alg,
(u8 *)ieee->ap_mac_addr, 0, key);
rtl92e_set_swcam(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr, key);
}
}
end_hw_sec:
priv->rtllib->wx_set_enc = 0;
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
{
int ret = 0;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_auth(priv->rtllib, info, &data->param, extra);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_set_mlme(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_mlme(priv->rtllib, info, wrqu, extra);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_set_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
{
int ret = 0;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_gen_ie(priv->rtllib, extra, data->data.length);
mutex_unlock(&priv->wx_mutex);
return ret;
}
static int _rtl92e_wx_get_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
{
int ret = 0;
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
data->data.length = 0;
return 0;
}
if (data->data.length < ieee->wpa_ie_len)
return -E2BIG;
data->data.length = ieee->wpa_ie_len;
memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
return ret;
}
#define OID_RT_INTEL_PROMISCUOUS_MODE 0xFF0101F6
static int _rtl92e_wx_set_promisc_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
u32 info_buf[3];
u32 oid;
u32 promiscuous_on;
u32 fltr_src_sta_frame;
if (copy_from_user(info_buf, wrqu->data.pointer, sizeof(info_buf)))
return -EFAULT;
oid = info_buf[0];
promiscuous_on = info_buf[1];
fltr_src_sta_frame = info_buf[2];
if (oid == OID_RT_INTEL_PROMISCUOUS_MODE) {
ieee->intel_promiscuous_md_info.promiscuous_on =
(promiscuous_on) ? (true) : (false);
ieee->intel_promiscuous_md_info.fltr_src_sta_frame =
(fltr_src_sta_frame) ? (true) : (false);
(promiscuous_on) ?
(rtllib_EnableIntelPromiscuousMode(dev, false)) :
(rtllib_DisableIntelPromiscuousMode(dev, false));
netdev_info(dev,
"=======>%s(), on = %d, filter src sta = %d\n",
__func__, promiscuous_on,
fltr_src_sta_frame);
} else {
return -1;
}
return 0;
}
static int _rtl92e_wx_get_promisc_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
mutex_lock(&priv->wx_mutex);
snprintf(extra, 45, "PromiscuousMode:%d, FilterSrcSTAFrame:%d",
ieee->intel_promiscuous_md_info.promiscuous_on,
ieee->intel_promiscuous_md_info.fltr_src_sta_frame);
wrqu->data.length = strlen(extra) + 1;
mutex_unlock(&priv->wx_mutex);
return 0;
}
#define IW_IOCTL(x) ((x) - SIOCSIWCOMMIT)
static iw_handler r8192_wx_handlers[] = {
[IW_IOCTL(SIOCGIWNAME)] = _rtl92e_wx_get_name,
[IW_IOCTL(SIOCSIWFREQ)] = _rtl92e_wx_set_freq,
[IW_IOCTL(SIOCGIWFREQ)] = _rtl92e_wx_get_freq,
[IW_IOCTL(SIOCSIWMODE)] = _rtl92e_wx_set_mode,
[IW_IOCTL(SIOCGIWMODE)] = _rtl92e_wx_get_mode,
[IW_IOCTL(SIOCGIWRANGE)] = _rtl92e_wx_get_range,
[IW_IOCTL(SIOCSIWAP)] = _rtl92e_wx_set_wap,
[IW_IOCTL(SIOCGIWAP)] = _rtl92e_wx_get_wap,
[IW_IOCTL(SIOCSIWSCAN)] = _rtl92e_wx_set_scan,
[IW_IOCTL(SIOCGIWSCAN)] = _rtl92e_wx_get_scan,
[IW_IOCTL(SIOCSIWESSID)] = _rtl92e_wx_set_essid,
[IW_IOCTL(SIOCGIWESSID)] = _rtl92e_wx_get_essid,
[IW_IOCTL(SIOCSIWNICKN)] = _rtl92e_wx_set_nick,
[IW_IOCTL(SIOCGIWNICKN)] = _rtl92e_wx_get_nick,
[IW_IOCTL(SIOCSIWRATE)] = _rtl92e_wx_set_rate,
[IW_IOCTL(SIOCGIWRATE)] = _rtl92e_wx_get_rate,
[IW_IOCTL(SIOCSIWRTS)] = _rtl92e_wx_set_rts,
[IW_IOCTL(SIOCGIWRTS)] = _rtl92e_wx_get_rts,
[IW_IOCTL(SIOCSIWFRAG)] = _rtl92e_wx_set_frag,
[IW_IOCTL(SIOCGIWFRAG)] = _rtl92e_wx_get_frag,
[IW_IOCTL(SIOCSIWRETRY)] = _rtl92e_wx_set_retry,
[IW_IOCTL(SIOCGIWRETRY)] = _rtl92e_wx_get_retry,
[IW_IOCTL(SIOCSIWENCODE)] = _rtl92e_wx_set_enc,
[IW_IOCTL(SIOCGIWENCODE)] = _rtl92e_wx_get_enc,
[IW_IOCTL(SIOCSIWPOWER)] = _rtl92e_wx_set_power,
[IW_IOCTL(SIOCGIWPOWER)] = _rtl92e_wx_get_power,
[IW_IOCTL(SIOCSIWGENIE)] = _rtl92e_wx_set_gen_ie,
[IW_IOCTL(SIOCGIWGENIE)] = _rtl92e_wx_get_gen_ie,
[IW_IOCTL(SIOCSIWMLME)] = _rtl92e_wx_set_mlme,
[IW_IOCTL(SIOCSIWAUTH)] = _rtl92e_wx_set_auth,
[IW_IOCTL(SIOCSIWENCODEEXT)] = _rtl92e_wx_set_encode_ext,
};
/* the following rule need to be following,
* Odd : get (world access),
* even : set (root access)
*/
static const struct iw_priv_args r8192_private_args[] = {
{
SIOCIWFIRSTPRIV + 0x0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_debugflag"
}, {
SIOCIWFIRSTPRIV + 0x1,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan"
}, {
SIOCIWFIRSTPRIV + 0x2,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx"
}, {
SIOCIWFIRSTPRIV + 0x3,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcereset"
}, {
SIOCIWFIRSTPRIV + 0x6,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE,
"set_power"
}, {
SIOCIWFIRSTPRIV + 0xa,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE,
"lps_interv"
}, {
SIOCIWFIRSTPRIV + 0xb,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE,
"lps_force"
}, {
SIOCIWFIRSTPRIV + 0x16,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0, "setpromisc"
}, {
SIOCIWFIRSTPRIV + 0x17,
0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 45, "getpromisc"
}
};
static iw_handler r8192_private_handler[] = {
(iw_handler)_rtl92e_wx_set_debug, /*SIOCIWSECONDPRIV*/
(iw_handler)_rtl92e_wx_set_scan_type,
(iw_handler)_rtl92e_wx_set_rawtx,
(iw_handler)_rtl92e_wx_force_reset,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)_rtl92e_wx_adapter_power_status,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)_rtl92e_wx_set_lps_awake_interval,
(iw_handler)_rtl92e_wx_set_force_lps,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)_rtl92e_wx_set_promisc_mode,
(iw_handler)_rtl92e_wx_get_promisc_mode,
};
static struct iw_statistics *_rtl92e_get_wireless_stats(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
struct iw_statistics *wstats = &priv->wstats;
int tmp_level = 0;
int tmp_qual = 0;
int tmp_noise = 0;
if (ieee->link_state < MAC80211_LINKED) {
wstats->qual.qual = 10;
wstats->qual.level = 0;
wstats->qual.noise = 0x100 - 100; /* -100 dBm */
wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
return wstats;
}
tmp_level = (&ieee->current_network)->stats.rssi;
tmp_qual = (&ieee->current_network)->stats.signal;
tmp_noise = (&ieee->current_network)->stats.noise;
wstats->qual.level = tmp_level;
wstats->qual.qual = tmp_qual;
wstats->qual.noise = tmp_noise;
wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
return wstats;
}
const struct iw_handler_def r8192_wx_handlers_def = {
.standard = r8192_wx_handlers,
.num_standard = ARRAY_SIZE(r8192_wx_handlers),
.private = r8192_private_handler,
.num_private = ARRAY_SIZE(r8192_private_handler),
.num_private_args = sizeof(r8192_private_args) /
sizeof(struct iw_priv_args),
.get_wireless_stats = _rtl92e_get_wireless_stats,
.private_args = (struct iw_priv_args *)r8192_private_args,
};
| linux-master | drivers/staging/rtl8192e/rtl8192e/rtl_wx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtl_core.h"
#include "rtl_dm.h"
#include "r8192E_hw.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
#include "r8190P_rtl8256.h"
#include "r8192E_cmdpkt.h"
/*---------------------------Define Local Constant---------------------------*/
static u32 edca_setting_DL[HT_IOT_PEER_MAX] = {
0x5e4322,
0x5e4322,
0x5ea44f,
0x5e4322,
0x604322,
0xa44f,
0x5e4322,
0x5e4332
};
static u32 edca_setting_DL_GMode[HT_IOT_PEER_MAX] = {
0x5e4322,
0x5e4322,
0x5e4322,
0x5e4322,
0x604322,
0xa44f,
0x5e4322,
0x5e4322
};
static u32 edca_setting_UL[HT_IOT_PEER_MAX] = {
0x5e4322,
0xa44f,
0x5ea44f,
0x5e4322,
0x604322,
0x5e4322,
0x5e4322,
0x5e4332
};
const u32 dm_tx_bb_gain[TX_BB_GAIN_TABLE_LEN] = {
0x7f8001fe, /* 12 dB */
0x788001e2, /* 11 dB */
0x71c001c7,
0x6b8001ae,
0x65400195,
0x5fc0017f,
0x5a400169,
0x55400155,
0x50800142,
0x4c000130,
0x47c0011f,
0x43c0010f,
0x40000100,
0x3c8000f2,
0x390000e4,
0x35c000d7,
0x32c000cb,
0x300000c0,
0x2d4000b5,
0x2ac000ab,
0x288000a2,
0x26000098,
0x24000090,
0x22000088,
0x20000080,
0x1a00006c,
0x1c800072,
0x18000060,
0x19800066,
0x15800056,
0x26c0005b,
0x14400051,
0x24400051,
0x1300004c,
0x12000048,
0x11000044,
0x10000040, /* -24 dB */
};
const u8 dm_cck_tx_bb_gain[CCK_TX_BB_GAIN_TABLE_LEN][8] = {
{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
{0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
{0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
{0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
{0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
{0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
{0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
{0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
{0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
{0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
{0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
{0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
{0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
{0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
{0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
{0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
{0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
{0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
{0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
{0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}
};
const u8 dm_cck_tx_bb_gain_ch14[CCK_TX_BB_GAIN_TABLE_LEN][8] = {
{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
{0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
{0x2d, 0x2d, 0x27, 0x17, 0x00, 0x00, 0x00, 0x00},
{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
{0x28, 0x28, 0x22, 0x14, 0x00, 0x00, 0x00, 0x00},
{0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
{0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
{0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
{0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
{0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
{0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
{0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
{0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
{0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
{0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
{0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
{0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
{0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
{0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
{0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
{0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
{0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}
};
/*---------------------------Define Local Constant---------------------------*/
/*------------------------Define global variable-----------------------------*/
struct dig_t dm_digtable;
struct drx_path_sel dm_rx_path_sel_table;
/*------------------------Define global variable-----------------------------*/
/*------------------------Define local variable------------------------------*/
/*------------------------Define local variable------------------------------*/
/*---------------------Define local function prototype-----------------------*/
static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev);
static void _rtl92e_dm_init_bandwidth_autoswitch(struct net_device *dev);
static void _rtl92e_dm_bandwidth_autoswitch(struct net_device *dev);
static void _rtl92e_dm_check_tx_power_tracking(struct net_device *dev);
static void _rtl92e_dm_bb_initialgain_restore(struct net_device *dev);
static void _rtl92e_dm_dig_init(struct net_device *dev);
static void _rtl92e_dm_ctrl_initgain_byrssi(struct net_device *dev);
static void _rtl92e_dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev);
static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev);
static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev);
static void _rtl92e_dm_initial_gain(struct net_device *dev);
static void _rtl92e_dm_pd_th(struct net_device *dev);
static void _rtl92e_dm_cs_ratio(struct net_device *dev);
static void _rtl92e_dm_init_cts_to_self(struct net_device *dev);
static void _rtl92e_dm_check_edca_turbo(struct net_device *dev);
static void _rtl92e_dm_check_rx_path_selection(struct net_device *dev);
static void _rtl92e_dm_init_rx_path_selection(struct net_device *dev);
static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev);
static void _rtl92e_dm_init_fsync(struct net_device *dev);
static void _rtl92e_dm_deinit_fsync(struct net_device *dev);
static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev);
static void _rtl92e_dm_check_fsync(struct net_device *dev);
static void _rtl92e_dm_check_rf_ctrl_gpio(void *data);
static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t);
/*---------------------Define local function prototype-----------------------*/
static void _rtl92e_dm_init_dynamic_tx_power(struct net_device *dev);
static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev);
static void _rtl92e_dm_send_rssi_to_fw(struct net_device *dev);
static void _rtl92e_dm_cts_to_self(struct net_device *dev);
/*---------------------------Define function prototype------------------------*/
void rtl92e_dm_init(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->undecorated_smoothed_pwdb = -1;
_rtl92e_dm_init_dynamic_tx_power(dev);
rtl92e_init_adaptive_rate(dev);
_rtl92e_dm_dig_init(dev);
rtl92e_dm_init_edca_turbo(dev);
_rtl92e_dm_init_bandwidth_autoswitch(dev);
_rtl92e_dm_init_fsync(dev);
_rtl92e_dm_init_rx_path_selection(dev);
_rtl92e_dm_init_cts_to_self(dev);
INIT_DELAYED_WORK(&priv->gpio_change_rf_wq, (void *)_rtl92e_dm_check_rf_ctrl_gpio);
}
void rtl92e_dm_deinit(struct net_device *dev)
{
_rtl92e_dm_deinit_fsync(dev);
}
void rtl92e_dm_watchdog(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->being_init_adapter)
return;
_rtl92e_dm_check_txrateandretrycount(dev);
_rtl92e_dm_check_edca_turbo(dev);
_rtl92e_dm_check_rate_adaptive(dev);
_rtl92e_dm_dynamic_tx_power(dev);
_rtl92e_dm_check_tx_power_tracking(dev);
_rtl92e_dm_ctrl_initgain_byrssi(dev);
_rtl92e_dm_bandwidth_autoswitch(dev);
_rtl92e_dm_check_rx_path_selection(dev);
_rtl92e_dm_check_fsync(dev);
_rtl92e_dm_send_rssi_to_fw(dev);
_rtl92e_dm_cts_to_self(dev);
}
void rtl92e_init_adaptive_rate(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rate_adaptive *pra = &priv->rate_adaptive;
pra->ratr_state = DM_RATR_STA_MAX;
pra->high2low_rssi_thresh_for_ra = RATE_ADAPTIVE_TH_HIGH;
pra->low2high_rssi_thresh_for_ra20M = RATE_ADAPTIVE_TH_LOW_20M + 5;
pra->low2high_rssi_thresh_for_ra40M = RATE_ADAPTIVE_TH_LOW_40M + 5;
pra->high_rssi_thresh_for_ra = RATE_ADAPTIVE_TH_HIGH + 5;
pra->low_rssi_thresh_for_ra20M = RATE_ADAPTIVE_TH_LOW_20M;
pra->low_rssi_thresh_for_ra40M = RATE_ADAPTIVE_TH_LOW_40M;
if (priv->customer_id == RT_CID_819X_NETCORE)
pra->ping_rssi_enable = 1;
else
pra->ping_rssi_enable = 0;
pra->ping_rssi_thresh_for_ra = 15;
pra->upper_rssi_threshold_ratr = 0x000fc000;
pra->middle_rssi_threshold_ratr = 0x000ff000;
pra->low_rssi_threshold_ratr = 0x000ff001;
pra->low_rssi_threshold_ratr_40M = 0x000ff005;
pra->low_rssi_threshold_ratr_20M = 0x000ff001;
pra->ping_rssi_ratr = 0x0000000d;
}
static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_hi_throughput *ht_info = priv->rtllib->ht_info;
struct rate_adaptive *pra = &priv->rate_adaptive;
u32 current_ratr, target_ratr = 0;
u32 low_rssi_thresh_for_ra = 0, high_rssi_thresh_for_ra = 0;
bool bshort_gi_enabled = false;
static u8 ping_rssi_state;
if (!priv->up)
return;
if (pra->rate_adaptive_disabled)
return;
if (priv->rtllib->mode != WIRELESS_MODE_N_24G)
return;
if (priv->rtllib->link_state == MAC80211_LINKED) {
bshort_gi_enabled = (ht_info->cur_tx_bw40mhz &&
ht_info->bCurShortGI40MHz) ||
(!ht_info->cur_tx_bw40mhz &&
ht_info->bCurShortGI20MHz);
pra->upper_rssi_threshold_ratr =
(pra->upper_rssi_threshold_ratr & (~BIT31)) |
((bshort_gi_enabled) ? BIT31 : 0);
pra->middle_rssi_threshold_ratr =
(pra->middle_rssi_threshold_ratr & (~BIT31)) |
((bshort_gi_enabled) ? BIT31 : 0);
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) {
pra->low_rssi_threshold_ratr =
(pra->low_rssi_threshold_ratr_40M & (~BIT31)) |
((bshort_gi_enabled) ? BIT31 : 0);
} else {
pra->low_rssi_threshold_ratr =
(pra->low_rssi_threshold_ratr_20M & (~BIT31)) |
((bshort_gi_enabled) ? BIT31 : 0);
}
pra->ping_rssi_ratr =
(pra->ping_rssi_ratr & (~BIT31)) |
((bshort_gi_enabled) ? BIT31 : 0);
if (pra->ratr_state == DM_RATR_STA_HIGH) {
high_rssi_thresh_for_ra = pra->high2low_rssi_thresh_for_ra;
low_rssi_thresh_for_ra = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
(pra->low_rssi_thresh_for_ra40M) : (pra->low_rssi_thresh_for_ra20M);
} else if (pra->ratr_state == DM_RATR_STA_LOW) {
high_rssi_thresh_for_ra = pra->high_rssi_thresh_for_ra;
low_rssi_thresh_for_ra = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
(pra->low2high_rssi_thresh_for_ra40M) : (pra->low2high_rssi_thresh_for_ra20M);
} else {
high_rssi_thresh_for_ra = pra->high_rssi_thresh_for_ra;
low_rssi_thresh_for_ra = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
(pra->low_rssi_thresh_for_ra40M) : (pra->low_rssi_thresh_for_ra20M);
}
if (priv->undecorated_smoothed_pwdb >=
(long)high_rssi_thresh_for_ra) {
pra->ratr_state = DM_RATR_STA_HIGH;
target_ratr = pra->upper_rssi_threshold_ratr;
} else if (priv->undecorated_smoothed_pwdb >=
(long)low_rssi_thresh_for_ra) {
pra->ratr_state = DM_RATR_STA_MIDDLE;
target_ratr = pra->middle_rssi_threshold_ratr;
} else {
pra->ratr_state = DM_RATR_STA_LOW;
target_ratr = pra->low_rssi_threshold_ratr;
}
if (pra->ping_rssi_enable) {
if (priv->undecorated_smoothed_pwdb <
(long)(pra->ping_rssi_thresh_for_ra + 5)) {
if ((priv->undecorated_smoothed_pwdb <
(long)pra->ping_rssi_thresh_for_ra) ||
ping_rssi_state) {
pra->ratr_state = DM_RATR_STA_LOW;
target_ratr = pra->ping_rssi_ratr;
ping_rssi_state = 1;
}
} else {
ping_rssi_state = 0;
}
}
if (priv->rtllib->GetHalfNmodeSupportByAPsHandler(dev))
target_ratr &= 0xf00fffff;
current_ratr = rtl92e_readl(dev, RATR0);
if (target_ratr != current_ratr) {
u32 ratr_value;
ratr_value = target_ratr;
ratr_value &= ~(RATE_ALL_OFDM_2SS);
rtl92e_writel(dev, RATR0, ratr_value);
rtl92e_writeb(dev, UFWP, 1);
pra->last_ratr = target_ratr;
}
} else {
pra->ratr_state = DM_RATR_STA_MAX;
}
}
static void _rtl92e_dm_init_bandwidth_autoswitch(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->rtllib->bandwidth_auto_switch.threshold_20Mhzto40Mhz = BW_AUTO_SWITCH_LOW_HIGH;
priv->rtllib->bandwidth_auto_switch.threshold_40Mhzto20Mhz = BW_AUTO_SWITCH_HIGH_LOW;
priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz = false;
priv->rtllib->bandwidth_auto_switch.bautoswitch_enable = false;
}
static void _rtl92e_dm_bandwidth_autoswitch(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->current_chnl_bw == HT_CHANNEL_WIDTH_20 ||
!priv->rtllib->bandwidth_auto_switch.bautoswitch_enable)
return;
if (!priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz) {
if (priv->undecorated_smoothed_pwdb <=
priv->rtllib->bandwidth_auto_switch.threshold_40Mhzto20Mhz)
priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz = true;
} else {
if (priv->undecorated_smoothed_pwdb >=
priv->rtllib->bandwidth_auto_switch.threshold_20Mhzto40Mhz)
priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz = false;
}
}
static u32 OFDMSwingTable[OFDM_TABLE_LEN] = {
0x7f8001fe,
0x71c001c7,
0x65400195,
0x5a400169,
0x50800142,
0x47c0011f,
0x40000100,
0x390000e4,
0x32c000cb,
0x2d4000b5,
0x288000a2,
0x24000090,
0x20000080,
0x1c800072,
0x19800066,
0x26c0005b,
0x24400051,
0x12000048,
0x10000040
};
static u8 CCKSwingTable_Ch1_Ch13[CCK_TABLE_LEN][8] = {
{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
{0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
{0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
{0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
{0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
{0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
{0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
{0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
{0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}
};
static u8 CCKSwingTable_Ch14[CCK_TABLE_LEN][8] = {
{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
{0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
{0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
{0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
{0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
{0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
{0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
{0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
{0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
{0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}
};
#define Pw_Track_Flag 0x11d
#define Tssi_Mea_Value 0x13c
#define Tssi_Report_Value1 0x134
#define Tssi_Report_Value2 0x13e
#define FW_Busy_Flag 0x13f
static void _rtl92e_dm_tx_update_tssi_weak_signal(struct net_device *dev)
{
struct r8192_priv *p = rtllib_priv(dev);
if (p->rfa_txpowertrackingindex > 0) {
p->rfa_txpowertrackingindex--;
if (p->rfa_txpowertrackingindex_real > 4) {
p->rfa_txpowertrackingindex_real--;
rtl92e_set_bb_reg(dev,
rOFDM0_XATxIQImbalance,
bMaskDWord,
dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
}
} else {
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
bMaskDWord, dm_tx_bb_gain[4]);
}
}
static void _rtl92e_dm_tx_update_tssi_strong_signal(struct net_device *dev)
{
struct r8192_priv *p = rtllib_priv(dev);
if (p->rfa_txpowertrackingindex < (TX_BB_GAIN_TABLE_LEN - 1)) {
p->rfa_txpowertrackingindex++;
p->rfa_txpowertrackingindex_real++;
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
bMaskDWord,
dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
} else {
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
bMaskDWord,
dm_tx_bb_gain[TX_BB_GAIN_TABLE_LEN - 1]);
}
}
static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
bool viviflag = false;
struct dcmd_txcmd tx_cmd;
int i = 0, j = 0, k = 0;
u8 tmp_report[5] = {0, 0, 0, 0, 0};
u8 Pwr_Flag;
u16 Avg_TSSI_Meas, tssi_13dBm, Avg_TSSI_Meas_from_driver = 0;
u32 delta = 0;
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
priv->rtllib->bdynamic_txpower_enable = false;
for (j = 0; j <= 30; j++) {
tx_cmd.op = TXCMD_SET_TX_PWR_TRACKING;
tx_cmd.length = 4;
tx_cmd.value = priv->pwr_track >> 24;
rtl92e_send_cmd_pkt(dev, DESC_PACKET_TYPE_NORMAL, (u8 *)&tx_cmd,
sizeof(struct dcmd_txcmd));
mdelay(1);
for (i = 0; i <= 30; i++) {
Pwr_Flag = rtl92e_readb(dev, Pw_Track_Flag);
if (Pwr_Flag == 0) {
mdelay(1);
if (priv->reset_in_progress) {
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
}
if (priv->rtllib->rf_power_state != rf_on) {
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
}
continue;
}
Avg_TSSI_Meas = rtl92e_readw(dev, Tssi_Mea_Value);
if (Avg_TSSI_Meas == 0) {
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
}
for (k = 0; k < 5; k++) {
if (k != 4)
tmp_report[k] = rtl92e_readb(dev,
Tssi_Report_Value1 + k);
else
tmp_report[k] = rtl92e_readb(dev,
Tssi_Report_Value2);
if (tmp_report[k] <= 20) {
viviflag = true;
break;
}
}
if (viviflag) {
rtl92e_writeb(dev, Pw_Track_Flag, 0);
viviflag = false;
for (k = 0; k < 5; k++)
tmp_report[k] = 0;
break;
}
for (k = 0; k < 5; k++)
Avg_TSSI_Meas_from_driver += tmp_report[k];
Avg_TSSI_Meas_from_driver *= 100 / 5;
tssi_13dBm = priv->tssi_13dBm;
if (Avg_TSSI_Meas_from_driver > tssi_13dBm)
delta = Avg_TSSI_Meas_from_driver - tssi_13dBm;
else
delta = tssi_13dBm - Avg_TSSI_Meas_from_driver;
if (delta <= E_FOR_TX_POWER_TRACK) {
priv->rtllib->bdynamic_txpower_enable = true;
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
}
if (Avg_TSSI_Meas_from_driver < tssi_13dBm - E_FOR_TX_POWER_TRACK)
_rtl92e_dm_tx_update_tssi_weak_signal(dev);
else
_rtl92e_dm_tx_update_tssi_strong_signal(dev);
priv->cck_present_attn_diff
= priv->rfa_txpowertrackingindex_real - priv->rfa_txpowertracking_default;
if (priv->current_chnl_bw == HT_CHANNEL_WIDTH_20)
priv->cck_present_attn =
priv->cck_present_attn_20m_def +
priv->cck_present_attn_diff;
else
priv->cck_present_attn =
priv->cck_present_attn_40m_def +
priv->cck_present_attn_diff;
if (priv->cck_present_attn > (CCK_TX_BB_GAIN_TABLE_LEN - 1))
priv->cck_present_attn = CCK_TX_BB_GAIN_TABLE_LEN - 1;
if (priv->cck_present_attn < 0)
priv->cck_present_attn = 0;
if (priv->cck_present_attn > -1 &&
priv->cck_present_attn < CCK_TX_BB_GAIN_TABLE_LEN) {
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) {
priv->bcck_in_ch14 = false;
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else {
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
}
if (priv->cck_present_attn_diff <= -12 ||
priv->cck_present_attn_diff >= 24) {
priv->rtllib->bdynamic_txpower_enable = true;
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
}
rtl92e_writeb(dev, Pw_Track_Flag, 0);
Avg_TSSI_Meas_from_driver = 0;
for (k = 0; k < 5; k++)
tmp_report[k] = 0;
break;
}
rtl92e_writeb(dev, FW_Busy_Flag, 0);
}
priv->rtllib->bdynamic_txpower_enable = true;
rtl92e_writeb(dev, Pw_Track_Flag, 0);
}
static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
{
#define ThermalMeterVal 9
struct r8192_priv *priv = rtllib_priv(dev);
u32 tmp_reg, tmp_cck;
u8 tmp_ofdm_index, tmp_cck_index, tmp_cck_20m_index, tmp_cck_40m_index, tmpval;
int i = 0, CCKSwingNeedUpdate = 0;
if (!priv->tx_pwr_tracking_init) {
tmp_reg = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
bMaskDWord);
for (i = 0; i < OFDM_TABLE_LEN; i++) {
if (tmp_reg == OFDMSwingTable[i])
priv->ofdm_index[0] = i;
}
tmp_cck = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1, bMaskByte2);
for (i = 0; i < CCK_TABLE_LEN; i++) {
if (tmp_cck == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
priv->cck_index = i;
break;
}
}
priv->tx_pwr_tracking_init = true;
return;
}
tmp_reg = rtl92e_get_rf_reg(dev, RF90_PATH_A, 0x12, 0x078);
if (tmp_reg < 3 || tmp_reg > 13)
return;
if (tmp_reg >= 12)
tmp_reg = 12;
priv->thermal_meter[0] = ThermalMeterVal;
priv->thermal_meter[1] = ThermalMeterVal;
if (priv->thermal_meter[0] >= (u8)tmp_reg) {
tmp_ofdm_index = 6 + (priv->thermal_meter[0] - (u8)tmp_reg);
tmp_cck_20m_index = tmp_ofdm_index;
tmp_cck_40m_index = tmp_cck_20m_index - 6;
if (tmp_ofdm_index >= OFDM_TABLE_LEN)
tmp_ofdm_index = OFDM_TABLE_LEN - 1;
if (tmp_cck_20m_index >= CCK_TABLE_LEN)
tmp_cck_20m_index = CCK_TABLE_LEN - 1;
if (tmp_cck_40m_index >= CCK_TABLE_LEN)
tmp_cck_40m_index = CCK_TABLE_LEN - 1;
} else {
tmpval = (u8)tmp_reg - priv->thermal_meter[0];
if (tmpval >= 6) {
tmp_ofdm_index = 0;
tmp_cck_20m_index = 0;
} else {
tmp_ofdm_index = 6 - tmpval;
tmp_cck_20m_index = 6 - tmpval;
}
tmp_cck_40m_index = 0;
}
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
tmp_cck_index = tmp_cck_40m_index;
else
tmp_cck_index = tmp_cck_20m_index;
priv->rec_cck_20m_idx = tmp_cck_20m_index;
priv->rec_cck_40m_idx = tmp_cck_40m_index;
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
CCKSwingNeedUpdate = 1;
} else if (priv->rtllib->current_network.channel != 14 &&
priv->bcck_in_ch14) {
priv->bcck_in_ch14 = false;
CCKSwingNeedUpdate = 1;
}
if (priv->cck_index != tmp_cck_index) {
priv->cck_index = tmp_cck_index;
CCKSwingNeedUpdate = 1;
}
if (CCKSwingNeedUpdate)
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
if (priv->ofdm_index[0] != tmp_ofdm_index) {
priv->ofdm_index[0] = tmp_ofdm_index;
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
OFDMSwingTable[priv->ofdm_index[0]]);
}
priv->txpower_count = 0;
}
void rtl92e_dm_txpower_tracking_wq(void *data)
{
struct r8192_priv *priv = container_of_dwork_rsl(data,
struct r8192_priv, txpower_tracking_wq);
struct net_device *dev = priv->rtllib->dev;
if (priv->ic_cut >= IC_VersionCut_D)
_rtl92e_dm_tx_power_tracking_callback_tssi(dev);
else
_rtl92e_dm_tx_power_tracking_cb_thermal(dev);
}
static void _rtl92e_dm_initialize_tx_power_tracking_tssi(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->btxpower_tracking = true;
priv->txpower_count = 0;
priv->tx_pwr_tracking_init = false;
}
static void _rtl92e_dm_init_tx_power_tracking_thermal(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->rtllib->FwRWRF)
priv->btxpower_tracking = true;
else
priv->btxpower_tracking = false;
priv->txpower_count = 0;
priv->tx_pwr_tracking_init = false;
}
void rtl92e_dm_init_txpower_tracking(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->ic_cut >= IC_VersionCut_D)
_rtl92e_dm_initialize_tx_power_tracking_tssi(dev);
else
_rtl92e_dm_init_tx_power_tracking_thermal(dev);
}
static void _rtl92e_dm_check_tx_power_tracking_tssi(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
static u32 tx_power_track_counter;
if (rtl92e_readb(dev, 0x11e) == 1)
return;
if (!priv->btxpower_tracking)
return;
tx_power_track_counter++;
if (tx_power_track_counter >= 180) {
schedule_delayed_work(&priv->txpower_tracking_wq, 0);
tx_power_track_counter = 0;
}
}
static void _rtl92e_dm_check_tx_power_tracking_thermal(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
static u8 TM_Trigger;
u8 TxPowerCheckCnt = 0;
TxPowerCheckCnt = 2;
if (!priv->btxpower_tracking)
return;
if (priv->txpower_count <= TxPowerCheckCnt) {
priv->txpower_count++;
return;
}
if (!TM_Trigger) {
rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
TM_Trigger = 1;
return;
}
netdev_info(dev, "===============>Schedule TxPowerTrackingWorkItem\n");
schedule_delayed_work(&priv->txpower_tracking_wq, 0);
TM_Trigger = 0;
}
static void _rtl92e_dm_check_tx_power_tracking(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->ic_cut >= IC_VersionCut_D)
_rtl92e_dm_check_tx_power_tracking_tssi(dev);
else
_rtl92e_dm_check_tx_power_tracking_thermal(dev);
}
static void _rtl92e_dm_cck_tx_power_adjust_tssi(struct net_device *dev,
bool bInCH14)
{
u32 TempVal;
struct r8192_priv *priv = rtllib_priv(dev);
u8 attenuation = priv->cck_present_attn;
TempVal = 0;
if (!bInCH14) {
TempVal = (u32)(dm_cck_tx_bb_gain[attenuation][0] +
(dm_cck_tx_bb_gain[attenuation][1] << 8));
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
TempVal = (u32)((dm_cck_tx_bb_gain[attenuation][2]) +
(dm_cck_tx_bb_gain[attenuation][3] << 8) +
(dm_cck_tx_bb_gain[attenuation][4] << 16) +
(dm_cck_tx_bb_gain[attenuation][5] << 24));
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
TempVal = (u32)(dm_cck_tx_bb_gain[attenuation][6] +
(dm_cck_tx_bb_gain[attenuation][7] << 8));
rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
} else {
TempVal = (u32)((dm_cck_tx_bb_gain_ch14[attenuation][0]) +
(dm_cck_tx_bb_gain_ch14[attenuation][1] << 8));
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
TempVal = (u32)((dm_cck_tx_bb_gain_ch14[attenuation][2]) +
(dm_cck_tx_bb_gain_ch14[attenuation][3] << 8) +
(dm_cck_tx_bb_gain_ch14[attenuation][4] << 16) +
(dm_cck_tx_bb_gain_ch14[attenuation][5] << 24));
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
TempVal = (u32)((dm_cck_tx_bb_gain_ch14[attenuation][6]) +
(dm_cck_tx_bb_gain_ch14[attenuation][7] << 8));
rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
}
}
static void _rtl92e_dm_cck_tx_power_adjust_thermal_meter(struct net_device *dev,
bool bInCH14)
{
u32 TempVal;
struct r8192_priv *priv = rtllib_priv(dev);
TempVal = 0;
if (!bInCH14) {
TempVal = CCKSwingTable_Ch1_Ch13[priv->cck_index][0] +
(CCKSwingTable_Ch1_Ch13[priv->cck_index][1] << 8);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
TempVal = CCKSwingTable_Ch1_Ch13[priv->cck_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->cck_index][3] << 8) +
(CCKSwingTable_Ch1_Ch13[priv->cck_index][4] << 16) +
(CCKSwingTable_Ch1_Ch13[priv->cck_index][5] << 24);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
TempVal = CCKSwingTable_Ch1_Ch13[priv->cck_index][6] +
(CCKSwingTable_Ch1_Ch13[priv->cck_index][7] << 8);
rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
} else {
TempVal = CCKSwingTable_Ch14[priv->cck_index][0] +
(CCKSwingTable_Ch14[priv->cck_index][1] << 8);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
TempVal = CCKSwingTable_Ch14[priv->cck_index][2] +
(CCKSwingTable_Ch14[priv->cck_index][3] << 8) +
(CCKSwingTable_Ch14[priv->cck_index][4] << 16) +
(CCKSwingTable_Ch14[priv->cck_index][5] << 24);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
TempVal = CCKSwingTable_Ch14[priv->cck_index][6] +
(CCKSwingTable_Ch14[priv->cck_index][7] << 8);
rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
}
}
void rtl92e_dm_cck_txpower_adjust(struct net_device *dev, bool binch14)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->ic_cut >= IC_VersionCut_D)
_rtl92e_dm_cck_tx_power_adjust_tssi(dev, binch14);
else
_rtl92e_dm_cck_tx_power_adjust_thermal_meter(dev, binch14);
}
static void _rtl92e_dm_tx_power_reset_recovery(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
rtl92e_set_bb_reg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord,
dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
}
void rtl92e_dm_restore_state(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 reg_ratr = priv->rate_adaptive.last_ratr;
u32 ratr_value;
if (!priv->up)
return;
if (priv->rate_adaptive.rate_adaptive_disabled)
return;
if (priv->rtllib->mode != WIRELESS_MODE_N_24G)
return;
ratr_value = reg_ratr;
ratr_value &= ~(RATE_ALL_OFDM_2SS);
rtl92e_writel(dev, RATR0, ratr_value);
rtl92e_writeb(dev, UFWP, 1);
if (priv->tx_pwr_tracking_init && priv->btxpower_tracking)
_rtl92e_dm_tx_power_reset_recovery(dev);
_rtl92e_dm_bb_initialgain_restore(dev);
}
static void _rtl92e_dm_bb_initialgain_restore(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 bit_mask = 0x7f;
if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
return;
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
rtl92e_set_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask,
(u32)priv->initgain_backup.xaagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask,
(u32)priv->initgain_backup.xbagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask,
(u32)priv->initgain_backup.xcagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask,
(u32)priv->initgain_backup.xdagccore1);
bit_mask = bMaskByte2;
rtl92e_set_bb_reg(dev, rCCK0_CCA, bit_mask,
(u32)priv->initgain_backup.cca);
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
}
void rtl92e_dm_backup_state(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 bit_mask = bMaskByte0;
priv->bswitch_fsync = false;
if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
return;
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
priv->initgain_backup.xaagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask);
priv->initgain_backup.xbagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask);
priv->initgain_backup.xcagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask);
priv->initgain_backup.xdagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
bit_mask = bMaskByte2;
priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev, rCCK0_CCA, bit_mask);
}
static void _rtl92e_dm_dig_init(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
dm_digtable.dig_enable_flag = true;
dm_digtable.dig_algorithm = DIG_ALGO_BY_RSSI;
dm_digtable.dig_algorithm_switch = 0;
dm_digtable.dig_state = DM_STA_DIG_MAX;
dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
dm_digtable.cur_sta_connect_state = DIG_STA_DISCONNECT;
dm_digtable.pre_sta_connect_state = DIG_STA_DISCONNECT;
dm_digtable.rssi_low_thresh = DM_DIG_THRESH_LOW;
dm_digtable.rssi_high_thresh = DM_DIG_THRESH_HIGH;
dm_digtable.rssi_high_power_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
dm_digtable.rssi_high_power_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
dm_digtable.rssi_val = 50;
dm_digtable.backoff_val = DM_DIG_BACKOFF;
dm_digtable.rx_gain_range_max = DM_DIG_MAX;
if (priv->customer_id == RT_CID_819X_NETCORE)
dm_digtable.rx_gain_range_min = DM_DIG_MIN_Netcore;
else
dm_digtable.rx_gain_range_min = DM_DIG_MIN;
}
static void _rtl92e_dm_ctrl_initgain_byrssi(struct net_device *dev)
{
if (!dm_digtable.dig_enable_flag)
return;
if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
_rtl92e_dm_ctrl_initgain_byrssi_false_alarm(dev);
else if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
_rtl92e_dm_ctrl_initgain_byrssi_driver(dev);
else
return;
}
/*-----------------------------------------------------------------------------
* Function: dm_CtrlInitGainBeforeConnectByRssiAndFalseAlarm()
*
* Overview: Driver monitor RSSI and False Alarm to change initial gain.
Only change initial gain during link in progress.
*
* Input: IN PADAPTER pAdapter
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 03/04/2009 hpfan Create Version 0.
*
******************************************************************************/
static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 i;
static u8 fw_dig;
if (!dm_digtable.dig_enable_flag)
return;
if (dm_digtable.dig_algorithm_switch)
fw_dig = 0;
if (fw_dig <= 3) {
for (i = 0; i < 3; i++)
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
fw_dig++;
dm_digtable.dig_state = DM_STA_DIG_OFF;
}
if (priv->rtllib->link_state == MAC80211_LINKED)
dm_digtable.cur_sta_connect_state = DIG_STA_CONNECT;
else
dm_digtable.cur_sta_connect_state = DIG_STA_DISCONNECT;
dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb;
_rtl92e_dm_initial_gain(dev);
_rtl92e_dm_pd_th(dev);
_rtl92e_dm_cs_ratio(dev);
if (dm_digtable.dig_algorithm_switch)
dm_digtable.dig_algorithm_switch = 0;
dm_digtable.pre_sta_connect_state = dm_digtable.cur_sta_connect_state;
}
static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
static u32 reset_cnt;
u8 i;
if (!dm_digtable.dig_enable_flag)
return;
if (dm_digtable.dig_algorithm_switch) {
dm_digtable.dig_state = DM_STA_DIG_MAX;
for (i = 0; i < 3; i++)
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
dm_digtable.dig_algorithm_switch = 0;
}
if (priv->rtllib->link_state != MAC80211_LINKED)
return;
if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_low_thresh) &&
(priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_thresh))
return;
if (priv->undecorated_smoothed_pwdb <= dm_digtable.rssi_low_thresh) {
if (dm_digtable.dig_state == DM_STA_DIG_OFF &&
(priv->reset_count == reset_cnt))
return;
reset_cnt = priv->reset_count;
dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
dm_digtable.dig_state = DM_STA_DIG_OFF;
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x17);
rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x17);
rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x17);
rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x17);
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x00);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x42);
rtl92e_writeb(dev, 0xa0a, 0x08);
return;
}
if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) {
u8 reset_flag = 0;
if (dm_digtable.dig_state == DM_STA_DIG_ON &&
(priv->reset_count == reset_cnt)) {
_rtl92e_dm_ctrl_initgain_byrssi_highpwr(dev);
return;
}
if (priv->reset_count != reset_cnt)
reset_flag = 1;
reset_cnt = priv->reset_count;
dm_digtable.dig_state = DM_STA_DIG_ON;
if (reset_flag == 1) {
rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x2c);
rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x2c);
rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x2c);
rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x2c);
} else {
rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x20);
rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x20);
rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x20);
rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x20);
}
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x20);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
rtl92e_writeb(dev, 0xa0a, 0xcd);
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
}
_rtl92e_dm_ctrl_initgain_byrssi_highpwr(dev);
}
static void _rtl92e_dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
static u32 reset_cnt_highpwr;
if ((priv->undecorated_smoothed_pwdb >
dm_digtable.rssi_high_power_lowthresh) &&
(priv->undecorated_smoothed_pwdb <
dm_digtable.rssi_high_power_highthresh))
return;
if (priv->undecorated_smoothed_pwdb >=
dm_digtable.rssi_high_power_highthresh) {
if (dm_digtable.dig_highpwr_state == DM_STA_DIG_ON &&
(priv->reset_count == reset_cnt_highpwr))
return;
dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x10);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x43);
} else {
if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF &&
(priv->reset_count == reset_cnt_highpwr))
return;
dm_digtable.dig_highpwr_state = DM_STA_DIG_OFF;
if ((priv->undecorated_smoothed_pwdb <
dm_digtable.rssi_high_power_lowthresh) &&
(priv->undecorated_smoothed_pwdb >=
dm_digtable.rssi_high_thresh)) {
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x20);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
}
}
reset_cnt_highpwr = priv->reset_count;
}
static void _rtl92e_dm_initial_gain(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 initial_gain = 0;
static u8 initialized, force_write;
static u32 reset_cnt;
if (dm_digtable.dig_algorithm_switch) {
initialized = 0;
reset_cnt = 0;
}
if (rtllib_act_scanning(priv->rtllib, true)) {
force_write = 1;
return;
}
if (dm_digtable.pre_sta_connect_state == dm_digtable.cur_sta_connect_state) {
if (dm_digtable.cur_sta_connect_state == DIG_STA_CONNECT) {
long gain_range = dm_digtable.rssi_val + 10 -
dm_digtable.backoff_val;
gain_range = clamp_t(long, gain_range,
dm_digtable.rx_gain_range_min,
dm_digtable.rx_gain_range_max);
dm_digtable.cur_ig_value = gain_range;
} else {
if (dm_digtable.cur_ig_value == 0)
dm_digtable.cur_ig_value = priv->def_initial_gain[0];
else
dm_digtable.cur_ig_value = dm_digtable.pre_ig_value;
}
} else {
dm_digtable.cur_ig_value = priv->def_initial_gain[0];
dm_digtable.pre_ig_value = 0;
}
if (priv->reset_count != reset_cnt) {
force_write = 1;
reset_cnt = priv->reset_count;
}
if (dm_digtable.pre_ig_value != rtl92e_readb(dev, rOFDM0_XAAGCCore1))
force_write = 1;
if ((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
|| !initialized || force_write) {
initial_gain = dm_digtable.cur_ig_value;
rtl92e_writeb(dev, rOFDM0_XAAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XBAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XCAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XDAGCCore1, initial_gain);
dm_digtable.pre_ig_value = dm_digtable.cur_ig_value;
initialized = 1;
force_write = 0;
}
}
static void _rtl92e_dm_pd_th(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
static u8 initialized, force_write;
static u32 reset_cnt;
if (dm_digtable.dig_algorithm_switch) {
initialized = 0;
reset_cnt = 0;
}
if (dm_digtable.pre_sta_connect_state == dm_digtable.cur_sta_connect_state) {
if (dm_digtable.cur_sta_connect_state == DIG_STA_CONNECT) {
if (dm_digtable.rssi_val >=
dm_digtable.rssi_high_power_highthresh)
dm_digtable.curpd_thstate =
DIG_PD_AT_HIGH_POWER;
else if (dm_digtable.rssi_val <=
dm_digtable.rssi_low_thresh)
dm_digtable.curpd_thstate =
DIG_PD_AT_LOW_POWER;
else if ((dm_digtable.rssi_val >=
dm_digtable.rssi_high_thresh) &&
(dm_digtable.rssi_val <
dm_digtable.rssi_high_power_lowthresh))
dm_digtable.curpd_thstate =
DIG_PD_AT_NORMAL_POWER;
else
dm_digtable.curpd_thstate =
dm_digtable.prepd_thstate;
} else {
dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
}
} else {
dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
}
if (priv->reset_count != reset_cnt) {
force_write = 1;
reset_cnt = priv->reset_count;
}
if ((dm_digtable.prepd_thstate != dm_digtable.curpd_thstate) ||
(initialized <= 3) || force_write) {
if (dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER) {
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x00);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x42);
} else if (dm_digtable.curpd_thstate ==
DIG_PD_AT_NORMAL_POWER) {
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x20);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
} else if (dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER) {
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x10);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x43);
}
dm_digtable.prepd_thstate = dm_digtable.curpd_thstate;
if (initialized <= 3)
initialized++;
force_write = 0;
}
}
static void _rtl92e_dm_cs_ratio(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
static u8 initialized, force_write;
static u32 reset_cnt;
if (dm_digtable.dig_algorithm_switch) {
initialized = 0;
reset_cnt = 0;
}
if (dm_digtable.pre_sta_connect_state == dm_digtable.cur_sta_connect_state) {
if (dm_digtable.cur_sta_connect_state == DIG_STA_CONNECT) {
if (dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh)
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
else if (dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh)
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_HIGHER;
else
dm_digtable.curcs_ratio_state = dm_digtable.precs_ratio_state;
} else {
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
}
} else {
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
}
if (priv->reset_count != reset_cnt) {
force_write = 1;
reset_cnt = priv->reset_count;
}
if ((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
!initialized || force_write) {
if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER)
rtl92e_writeb(dev, 0xa0a, 0x08);
else if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_HIGHER)
rtl92e_writeb(dev, 0xa0a, 0xcd);
dm_digtable.precs_ratio_state = dm_digtable.curcs_ratio_state;
initialized = 1;
force_write = 0;
}
}
void rtl92e_dm_init_edca_turbo(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->bcurrent_turbo_EDCA = false;
priv->rtllib->bis_any_nonbepkts = false;
priv->bis_cur_rdlstate = false;
}
static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_hi_throughput *ht_info = priv->rtllib->ht_info;
static unsigned long lastTxOkCnt;
static unsigned long lastRxOkCnt;
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
if (priv->rtllib->iw_mode == IW_MODE_ADHOC)
goto dm_CheckEdcaTurbo_EXIT;
if (priv->rtllib->link_state != MAC80211_LINKED)
goto dm_CheckEdcaTurbo_EXIT;
if (priv->rtllib->ht_info->iot_action & HT_IOT_ACT_DISABLE_EDCA_TURBO)
goto dm_CheckEdcaTurbo_EXIT;
if (!priv->rtllib->bis_any_nonbepkts) {
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
if (ht_info->iot_action & HT_IOT_ACT_EDCA_BIAS_ON_RX) {
if (curTxOkCnt > 4 * curRxOkCnt) {
if (priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
rtl92e_writel(dev, EDCAPARA_BE,
edca_setting_UL[ht_info->IOTPeer]);
priv->bis_cur_rdlstate = false;
}
} else {
if (!priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
if (priv->rtllib->mode == WIRELESS_MODE_G)
rtl92e_writel(dev, EDCAPARA_BE,
edca_setting_DL_GMode[ht_info->IOTPeer]);
else
rtl92e_writel(dev, EDCAPARA_BE,
edca_setting_DL[ht_info->IOTPeer]);
priv->bis_cur_rdlstate = true;
}
}
priv->bcurrent_turbo_EDCA = true;
} else {
if (curRxOkCnt > 4 * curTxOkCnt) {
if (!priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
if (priv->rtllib->mode == WIRELESS_MODE_G)
rtl92e_writel(dev, EDCAPARA_BE,
edca_setting_DL_GMode[ht_info->IOTPeer]);
else
rtl92e_writel(dev, EDCAPARA_BE,
edca_setting_DL[ht_info->IOTPeer]);
priv->bis_cur_rdlstate = true;
}
} else {
if (priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
rtl92e_writel(dev, EDCAPARA_BE,
edca_setting_UL[ht_info->IOTPeer]);
priv->bis_cur_rdlstate = false;
}
}
priv->bcurrent_turbo_EDCA = true;
}
} else {
if (priv->bcurrent_turbo_EDCA) {
u8 tmp = AC0_BE;
priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM,
(u8 *)(&tmp));
priv->bcurrent_turbo_EDCA = false;
}
}
dm_CheckEdcaTurbo_EXIT:
priv->rtllib->bis_any_nonbepkts = false;
lastTxOkCnt = priv->stats.txbytesunicast;
lastRxOkCnt = priv->stats.rxbytesunicast;
}
static void _rtl92e_dm_init_cts_to_self(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv((struct net_device *)dev);
priv->rtllib->bCTSToSelfEnable = true;
}
static void _rtl92e_dm_cts_to_self(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv((struct net_device *)dev);
struct rt_hi_throughput *ht_info = priv->rtllib->ht_info;
static unsigned long lastTxOkCnt;
static unsigned long lastRxOkCnt;
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
if (!priv->rtllib->bCTSToSelfEnable) {
ht_info->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF;
return;
}
if (ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) {
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
if (curRxOkCnt > 4 * curTxOkCnt)
ht_info->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF;
else
ht_info->iot_action |= HT_IOT_ACT_FORCED_CTS2SELF;
lastTxOkCnt = priv->stats.txbytesunicast;
lastRxOkCnt = priv->stats.rxbytesunicast;
}
}
static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
{
struct r8192_priv *priv = container_of_dwork_rsl(data,
struct r8192_priv, gpio_change_rf_wq);
struct net_device *dev = priv->rtllib->dev;
u8 tmp1byte;
enum rt_rf_power_state rf_power_state_to_set;
bool bActuallySet = false;
if ((priv->up_first_time == 1) || (priv->being_init_adapter))
return;
if (priv->bfirst_after_down)
return;
tmp1byte = rtl92e_readb(dev, GPI);
rf_power_state_to_set = (tmp1byte & BIT1) ? rf_on : rf_off;
if (priv->hw_radio_off && (rf_power_state_to_set == rf_on)) {
netdev_info(dev, "gpiochangeRF - HW Radio ON\n");
priv->hw_radio_off = false;
bActuallySet = true;
} else if (!priv->hw_radio_off && (rf_power_state_to_set == rf_off)) {
netdev_info(dev, "gpiochangeRF - HW Radio OFF\n");
priv->hw_radio_off = true;
bActuallySet = true;
}
if (bActuallySet) {
mdelay(1000);
priv->hw_rf_off_action = 1;
rtl92e_set_rf_state(dev, rf_power_state_to_set, RF_CHANGE_BY_HW);
}
}
void rtl92e_dm_rf_pathcheck_wq(void *data)
{
struct r8192_priv *priv = container_of_dwork_rsl(data,
struct r8192_priv,
rfpath_check_wq);
struct net_device *dev = priv->rtllib->dev;
u8 rfpath, i;
rfpath = rtl92e_readb(dev, 0xc04);
for (i = 0; i < RF90_PATH_MAX; i++) {
if (rfpath & (0x01 << i))
priv->brfpath_rxenable[i] = true;
else
priv->brfpath_rxenable[i] = false;
}
if (!dm_rx_path_sel_table.enable)
return;
_rtl92e_dm_rx_path_sel_byrssi(dev);
}
static void _rtl92e_dm_init_rx_path_selection(struct net_device *dev)
{
u8 i;
struct r8192_priv *priv = rtllib_priv(dev);
dm_rx_path_sel_table.enable = 1;
dm_rx_path_sel_table.ss_th_low = RX_PATH_SEL_SS_TH_LOW;
dm_rx_path_sel_table.diff_th = RX_PATH_SEL_DIFF_TH;
if (priv->customer_id == RT_CID_819X_NETCORE)
dm_rx_path_sel_table.cck_method = CCK_Rx_Version_2;
else
dm_rx_path_sel_table.cck_method = CCK_Rx_Version_1;
dm_rx_path_sel_table.disabled_rf = 0;
for (i = 0; i < 4; i++) {
dm_rx_path_sel_table.rf_rssi[i] = 50;
dm_rx_path_sel_table.cck_pwdb_sta[i] = -64;
dm_rx_path_sel_table.rf_enable_rssi_th[i] = 100;
}
}
#define PWDB_IN_RANGE ((cur_cck_pwdb < tmp_cck_max_pwdb) && \
(cur_cck_pwdb > tmp_cck_sec_pwdb))
static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 i, max_rssi_index = 0, min_rssi_index = 0;
u8 sec_rssi_index = 0, rf_num = 0;
u8 tmp_max_rssi = 0, tmp_min_rssi = 0, tmp_sec_rssi = 0;
u8 cck_default_Rx = 0x2;
u8 cck_optional_Rx = 0x3;
long tmp_cck_max_pwdb = 0, tmp_cck_min_pwdb = 0, tmp_cck_sec_pwdb = 0;
u8 cck_rx_ver2_max_index = 0;
u8 cck_rx_ver2_sec_index = 0;
u8 cur_rf_rssi;
long cur_cck_pwdb;
static u8 disabled_rf_cnt, cck_Rx_Path_initialized;
u8 update_cck_rx_path;
if (!cck_Rx_Path_initialized) {
dm_rx_path_sel_table.cck_rx_path = (rtl92e_readb(dev, 0xa07) & 0xf);
cck_Rx_Path_initialized = 1;
}
dm_rx_path_sel_table.disabled_rf = 0xf;
dm_rx_path_sel_table.disabled_rf &= ~(rtl92e_readb(dev, 0xc04));
if (priv->rtllib->mode == WIRELESS_MODE_B)
dm_rx_path_sel_table.cck_method = CCK_Rx_Version_2;
for (i = 0; i < RF90_PATH_MAX; i++) {
dm_rx_path_sel_table.rf_rssi[i] = priv->stats.rx_rssi_percentage[i];
if (priv->brfpath_rxenable[i]) {
rf_num++;
cur_rf_rssi = dm_rx_path_sel_table.rf_rssi[i];
if (rf_num == 1) {
max_rssi_index = min_rssi_index = sec_rssi_index = i;
tmp_max_rssi = tmp_min_rssi = tmp_sec_rssi = cur_rf_rssi;
} else if (rf_num == 2) {
if (cur_rf_rssi >= tmp_max_rssi) {
tmp_max_rssi = cur_rf_rssi;
max_rssi_index = i;
} else {
tmp_sec_rssi = tmp_min_rssi = cur_rf_rssi;
sec_rssi_index = min_rssi_index = i;
}
} else {
if (cur_rf_rssi > tmp_max_rssi) {
tmp_sec_rssi = tmp_max_rssi;
sec_rssi_index = max_rssi_index;
tmp_max_rssi = cur_rf_rssi;
max_rssi_index = i;
} else if (cur_rf_rssi == tmp_max_rssi) {
tmp_sec_rssi = cur_rf_rssi;
sec_rssi_index = i;
} else if ((cur_rf_rssi < tmp_max_rssi) &&
(cur_rf_rssi > tmp_sec_rssi)) {
tmp_sec_rssi = cur_rf_rssi;
sec_rssi_index = i;
} else if (cur_rf_rssi == tmp_sec_rssi) {
if (tmp_sec_rssi == tmp_min_rssi) {
tmp_sec_rssi = cur_rf_rssi;
sec_rssi_index = i;
}
} else if ((cur_rf_rssi < tmp_sec_rssi) &&
(cur_rf_rssi > tmp_min_rssi)) {
;
} else if (cur_rf_rssi == tmp_min_rssi) {
if (tmp_sec_rssi == tmp_min_rssi) {
tmp_min_rssi = cur_rf_rssi;
min_rssi_index = i;
}
} else if (cur_rf_rssi < tmp_min_rssi) {
tmp_min_rssi = cur_rf_rssi;
min_rssi_index = i;
}
}
}
}
rf_num = 0;
if (dm_rx_path_sel_table.cck_method == CCK_Rx_Version_2) {
for (i = 0; i < RF90_PATH_MAX; i++) {
if (priv->brfpath_rxenable[i]) {
rf_num++;
cur_cck_pwdb =
dm_rx_path_sel_table.cck_pwdb_sta[i];
if (rf_num == 1) {
cck_rx_ver2_max_index = i;
cck_rx_ver2_sec_index = i;
tmp_cck_max_pwdb = cur_cck_pwdb;
tmp_cck_min_pwdb = cur_cck_pwdb;
tmp_cck_sec_pwdb = cur_cck_pwdb;
} else if (rf_num == 2) {
if (cur_cck_pwdb >= tmp_cck_max_pwdb) {
tmp_cck_max_pwdb = cur_cck_pwdb;
cck_rx_ver2_max_index = i;
} else {
tmp_cck_sec_pwdb = cur_cck_pwdb;
tmp_cck_min_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = i;
}
} else {
if (cur_cck_pwdb > tmp_cck_max_pwdb) {
tmp_cck_sec_pwdb =
tmp_cck_max_pwdb;
cck_rx_ver2_sec_index =
cck_rx_ver2_max_index;
tmp_cck_max_pwdb = cur_cck_pwdb;
cck_rx_ver2_max_index = i;
} else if (cur_cck_pwdb ==
tmp_cck_max_pwdb) {
tmp_cck_sec_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = i;
} else if (PWDB_IN_RANGE) {
tmp_cck_sec_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = i;
} else if (cur_cck_pwdb ==
tmp_cck_sec_pwdb) {
if (tmp_cck_sec_pwdb ==
tmp_cck_min_pwdb) {
tmp_cck_sec_pwdb =
cur_cck_pwdb;
cck_rx_ver2_sec_index =
i;
}
} else if ((cur_cck_pwdb < tmp_cck_sec_pwdb) &&
(cur_cck_pwdb > tmp_cck_min_pwdb)) {
;
} else if (cur_cck_pwdb == tmp_cck_min_pwdb) {
if (tmp_cck_sec_pwdb == tmp_cck_min_pwdb)
tmp_cck_min_pwdb = cur_cck_pwdb;
} else if (cur_cck_pwdb < tmp_cck_min_pwdb) {
tmp_cck_min_pwdb = cur_cck_pwdb;
}
}
}
}
}
update_cck_rx_path = 0;
if (dm_rx_path_sel_table.cck_method == CCK_Rx_Version_2) {
cck_default_Rx = cck_rx_ver2_max_index;
cck_optional_Rx = cck_rx_ver2_sec_index;
if (tmp_cck_max_pwdb != -64)
update_cck_rx_path = 1;
}
if (tmp_min_rssi < dm_rx_path_sel_table.ss_th_low && disabled_rf_cnt < 2) {
if ((tmp_max_rssi - tmp_min_rssi) >=
dm_rx_path_sel_table.diff_th) {
dm_rx_path_sel_table.rf_enable_rssi_th[min_rssi_index] =
tmp_max_rssi + 5;
rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable,
0x1 << min_rssi_index, 0x0);
rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable,
0x1 << min_rssi_index, 0x0);
disabled_rf_cnt++;
}
if (dm_rx_path_sel_table.cck_method == CCK_Rx_Version_1) {
cck_default_Rx = max_rssi_index;
cck_optional_Rx = sec_rssi_index;
if (tmp_max_rssi)
update_cck_rx_path = 1;
}
}
if (update_cck_rx_path) {
dm_rx_path_sel_table.cck_rx_path = (cck_default_Rx << 2) |
(cck_optional_Rx);
rtl92e_set_bb_reg(dev, rCCK0_AFESetting, 0x0f000000,
dm_rx_path_sel_table.cck_rx_path);
}
if (dm_rx_path_sel_table.disabled_rf) {
for (i = 0; i < 4; i++) {
if ((dm_rx_path_sel_table.disabled_rf >> i) & 0x1) {
if (tmp_max_rssi >=
dm_rx_path_sel_table.rf_enable_rssi_th[i]) {
rtl92e_set_bb_reg(dev,
rOFDM0_TRxPathEnable,
0x1 << i, 0x1);
rtl92e_set_bb_reg(dev,
rOFDM1_TRxPathEnable,
0x1 << i, 0x1);
dm_rx_path_sel_table.rf_enable_rssi_th[i]
= 100;
disabled_rf_cnt--;
}
}
}
}
}
static void _rtl92e_dm_check_rx_path_selection(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
schedule_delayed_work(&priv->rfpath_check_wq, 0);
}
static void _rtl92e_dm_init_fsync(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->rtllib->fsync_time_interval = 500;
priv->rtllib->fsync_rate_bitmap = 0x0f000800;
priv->rtllib->fsync_rssi_threshold = 30;
priv->rtllib->bfsync_enable = false;
priv->rtllib->fsync_multiple_timeinterval = 3;
priv->rtllib->fsync_firstdiff_ratethreshold = 100;
priv->rtllib->fsync_seconddiff_ratethreshold = 200;
priv->rtllib->fsync_state = Default_Fsync;
timer_setup(&priv->fsync_timer, _rtl92e_dm_fsync_timer_callback, 0);
}
static void _rtl92e_dm_deinit_fsync(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
del_timer_sync(&priv->fsync_timer);
}
static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
{
struct r8192_priv *priv = from_timer(priv, t, fsync_timer);
struct net_device *dev = priv->rtllib->dev;
u32 rate_index, rate_count = 0, rate_count_diff = 0;
bool bSwitchFromCountDiff = false;
bool bDoubleTimeInterval = false;
if (priv->rtllib->link_state == MAC80211_LINKED &&
priv->rtllib->bfsync_enable &&
(priv->rtllib->ht_info->iot_action & HT_IOT_ACT_CDD_FSYNC)) {
u32 rate_bitmap;
for (rate_index = 0; rate_index <= 27; rate_index++) {
rate_bitmap = 1 << rate_index;
if (priv->rtllib->fsync_rate_bitmap & rate_bitmap)
rate_count +=
priv->stats.received_rate_histogram[1]
[rate_index];
}
if (rate_count < priv->rate_record)
rate_count_diff = 0xffffffff - rate_count +
priv->rate_record;
else
rate_count_diff = rate_count - priv->rate_record;
if (rate_count_diff < priv->rate_count_diff_rec) {
u32 DiffNum = priv->rate_count_diff_rec -
rate_count_diff;
if (DiffNum >=
priv->rtllib->fsync_seconddiff_ratethreshold)
priv->continue_diff_count++;
else
priv->continue_diff_count = 0;
if (priv->continue_diff_count >= 2) {
bSwitchFromCountDiff = true;
priv->continue_diff_count = 0;
}
} else {
priv->continue_diff_count = 0;
}
if (rate_count_diff <=
priv->rtllib->fsync_firstdiff_ratethreshold) {
bSwitchFromCountDiff = true;
priv->continue_diff_count = 0;
}
priv->rate_record = rate_count;
priv->rate_count_diff_rec = rate_count_diff;
if (priv->undecorated_smoothed_pwdb >
priv->rtllib->fsync_rssi_threshold &&
bSwitchFromCountDiff) {
bDoubleTimeInterval = true;
priv->bswitch_fsync = !priv->bswitch_fsync;
if (priv->bswitch_fsync) {
rtl92e_writeb(dev, 0xC36, 0x1c);
rtl92e_writeb(dev, 0xC3e, 0x90);
} else {
rtl92e_writeb(dev, 0xC36, 0x5c);
rtl92e_writeb(dev, 0xC3e, 0x96);
}
} else if (priv->undecorated_smoothed_pwdb <=
priv->rtllib->fsync_rssi_threshold) {
if (priv->bswitch_fsync) {
priv->bswitch_fsync = false;
rtl92e_writeb(dev, 0xC36, 0x5c);
rtl92e_writeb(dev, 0xC3e, 0x96);
}
}
if (bDoubleTimeInterval) {
if (timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
priv->fsync_timer.expires = jiffies +
msecs_to_jiffies(priv->rtllib->fsync_time_interval *
priv->rtllib->fsync_multiple_timeinterval);
add_timer(&priv->fsync_timer);
} else {
if (timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
priv->fsync_timer.expires = jiffies +
msecs_to_jiffies(priv->rtllib->fsync_time_interval);
add_timer(&priv->fsync_timer);
}
} else {
if (priv->bswitch_fsync) {
priv->bswitch_fsync = false;
rtl92e_writeb(dev, 0xC36, 0x5c);
rtl92e_writeb(dev, 0xC3e, 0x96);
}
priv->continue_diff_count = 0;
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
}
}
static void _rtl92e_dm_start_hw_fsync(struct net_device *dev)
{
u8 rf_timing = 0x77;
struct r8192_priv *priv = rtllib_priv(dev);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c12cf);
priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING,
(u8 *)(&rf_timing));
rtl92e_writeb(dev, 0xc3b, 0x41);
}
static void _rtl92e_dm_end_hw_fsync(struct net_device *dev)
{
u8 rf_timing = 0xaa;
struct r8192_priv *priv = rtllib_priv(dev);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING, (u8 *)
(&rf_timing));
rtl92e_writeb(dev, 0xc3b, 0x49);
}
static void _rtl92e_dm_end_sw_fsync(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
del_timer_sync(&(priv->fsync_timer));
if (priv->bswitch_fsync) {
priv->bswitch_fsync = false;
rtl92e_writeb(dev, 0xC36, 0x5c);
rtl92e_writeb(dev, 0xC3e, 0x96);
}
priv->continue_diff_count = 0;
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
}
static void _rtl92e_dm_start_sw_fsync(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 rate_index;
u32 rate_bitmap;
priv->rate_record = 0;
priv->continue_diff_count = 0;
priv->rate_count_diff_rec = 0;
priv->bswitch_fsync = false;
if (priv->rtllib->mode == WIRELESS_MODE_N_24G) {
priv->rtllib->fsync_firstdiff_ratethreshold = 600;
priv->rtllib->fsync_seconddiff_ratethreshold = 0xffff;
} else {
priv->rtllib->fsync_firstdiff_ratethreshold = 200;
priv->rtllib->fsync_seconddiff_ratethreshold = 200;
}
for (rate_index = 0; rate_index <= 27; rate_index++) {
rate_bitmap = 1 << rate_index;
if (priv->rtllib->fsync_rate_bitmap & rate_bitmap)
priv->rate_record +=
priv->stats.received_rate_histogram[1]
[rate_index];
}
if (timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
priv->fsync_timer.expires = jiffies +
msecs_to_jiffies(priv->rtllib->fsync_time_interval);
add_timer(&priv->fsync_timer);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c12cd);
}
static void _rtl92e_dm_check_fsync(struct net_device *dev)
{
#define RegC38_Default 0
#define RegC38_NonFsync_Other_AP 1
#define RegC38_Fsync_AP_BCM 2
struct r8192_priv *priv = rtllib_priv(dev);
static u8 reg_c38_State = RegC38_Default;
static u32 reset_cnt;
if (priv->rtllib->link_state == MAC80211_LINKED &&
priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) {
if (priv->rtllib->bfsync_enable == 0) {
switch (priv->rtllib->fsync_state) {
case Default_Fsync:
_rtl92e_dm_start_hw_fsync(dev);
priv->rtllib->fsync_state = HW_Fsync;
break;
case SW_Fsync:
_rtl92e_dm_end_sw_fsync(dev);
_rtl92e_dm_start_hw_fsync(dev);
priv->rtllib->fsync_state = HW_Fsync;
break;
case HW_Fsync:
default:
break;
}
} else {
switch (priv->rtllib->fsync_state) {
case Default_Fsync:
_rtl92e_dm_start_sw_fsync(dev);
priv->rtllib->fsync_state = SW_Fsync;
break;
case HW_Fsync:
_rtl92e_dm_end_hw_fsync(dev);
_rtl92e_dm_start_sw_fsync(dev);
priv->rtllib->fsync_state = SW_Fsync;
break;
case SW_Fsync:
default:
break;
}
}
if (reg_c38_State != RegC38_Fsync_AP_BCM) {
rtl92e_writeb(dev, rOFDM0_RxDetector3, 0x95);
reg_c38_State = RegC38_Fsync_AP_BCM;
}
} else {
switch (priv->rtllib->fsync_state) {
case HW_Fsync:
_rtl92e_dm_end_hw_fsync(dev);
priv->rtllib->fsync_state = Default_Fsync;
break;
case SW_Fsync:
_rtl92e_dm_end_sw_fsync(dev);
priv->rtllib->fsync_state = Default_Fsync;
break;
case Default_Fsync:
default:
break;
}
if (priv->rtllib->link_state == MAC80211_LINKED) {
if (priv->undecorated_smoothed_pwdb <=
RegC38_TH) {
if (reg_c38_State !=
RegC38_NonFsync_Other_AP) {
rtl92e_writeb(dev,
rOFDM0_RxDetector3,
0x90);
reg_c38_State =
RegC38_NonFsync_Other_AP;
}
} else if (priv->undecorated_smoothed_pwdb >=
(RegC38_TH + 5)) {
if (reg_c38_State) {
rtl92e_writeb(dev,
rOFDM0_RxDetector3,
priv->framesync);
reg_c38_State = RegC38_Default;
}
}
} else {
if (reg_c38_State) {
rtl92e_writeb(dev, rOFDM0_RxDetector3,
priv->framesync);
reg_c38_State = RegC38_Default;
}
}
}
if (priv->reset_count != reset_cnt) {
rtl92e_writeb(dev, rOFDM0_RxDetector3,
priv->framesync);
reg_c38_State = RegC38_Default;
reset_cnt = priv->reset_count;
}
}
/*---------------------------Define function prototype------------------------*/
static void _rtl92e_dm_init_dynamic_tx_power(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->rtllib->bdynamic_txpower_enable = true;
priv->last_dtp_flag_high = false;
priv->last_dtp_flag_low = false;
priv->dynamic_tx_high_pwr = false;
priv->dynamic_tx_low_pwr = false;
}
static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
unsigned int txhipower_threshold = 0;
unsigned int txlowpower_threshold = 0;
if (!priv->rtllib->bdynamic_txpower_enable) {
priv->dynamic_tx_high_pwr = false;
priv->dynamic_tx_low_pwr = false;
return;
}
if ((priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_ATHEROS) &&
(priv->rtllib->mode == WIRELESS_MODE_G)) {
txhipower_threshold = TX_POWER_ATHEROAP_THRESH_HIGH;
txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
} else {
txhipower_threshold = TX_POWER_NEAR_FIELD_THRESH_HIGH;
txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW;
}
if (priv->rtllib->link_state == MAC80211_LINKED) {
if (priv->undecorated_smoothed_pwdb >= txhipower_threshold) {
priv->dynamic_tx_high_pwr = true;
priv->dynamic_tx_low_pwr = false;
} else {
if (priv->undecorated_smoothed_pwdb <
txlowpower_threshold && priv->dynamic_tx_high_pwr)
priv->dynamic_tx_high_pwr = false;
if (priv->undecorated_smoothed_pwdb < 35)
priv->dynamic_tx_low_pwr = true;
else if (priv->undecorated_smoothed_pwdb >= 40)
priv->dynamic_tx_low_pwr = false;
}
} else {
priv->dynamic_tx_high_pwr = false;
priv->dynamic_tx_low_pwr = false;
}
if ((priv->dynamic_tx_high_pwr != priv->last_dtp_flag_high) ||
(priv->dynamic_tx_low_pwr != priv->last_dtp_flag_low)) {
rtl92e_set_tx_power(dev, priv->rtllib->current_network.channel);
}
priv->last_dtp_flag_high = priv->dynamic_tx_high_pwr;
priv->last_dtp_flag_low = priv->dynamic_tx_low_pwr;
}
static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
ieee->softmac_stats.CurrentShowTxate = rtl92e_readb(dev, CURRENT_TX_RATE_REG);
ieee->softmac_stats.last_packet_rate = rtl92e_readb(dev, INITIAL_TX_RATE_REG);
ieee->softmac_stats.txretrycount = rtl92e_readl(dev, TX_RETRY_COUNT_REG);
}
static void _rtl92e_dm_send_rssi_to_fw(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
rtl92e_writeb(dev, DRIVER_RSSI, priv->undecorated_smoothed_pwdb);
}
| linux-master | drivers/staging/rtl8192e/rtl8192e/rtl_dm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <[email protected]>, et al.
*
* Contact Information: wlanfae <[email protected]>
*/
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include "rtl_core.h"
static void _rtl92e_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct r8192_priv *priv = rtllib_priv(dev);
strscpy(info->driver, DRV_NAME, sizeof(info->driver));
strscpy(info->version, DRV_VERSION, sizeof(info->version));
strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
}
static u32 _rtl92e_ethtool_get_link(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
return ((priv->rtllib->link_state == MAC80211_LINKED) ||
(priv->rtllib->link_state == MAC80211_LINKED_SCANNING));
}
const struct ethtool_ops rtl819x_ethtool_ops = {
.get_drvinfo = _rtl92e_ethtool_get_drvinfo,
.get_link = _rtl92e_ethtool_get_link,
};
| linux-master | drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <[email protected]>, et al.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtl_ps.h"
#include "rtl_core.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
#include "r8192E_cmdpkt.h"
#include <linux/jiffies.h>
static void _rtl92e_hw_sleep(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
unsigned long flags = 0;
spin_lock_irqsave(&priv->rf_ps_lock, flags);
if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
return;
}
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
rtl92e_set_rf_state(dev, rf_sleep, RF_CHANGE_BY_PS);
}
void rtl92e_hw_sleep_wq(void *data)
{
struct rtllib_device *ieee = container_of_dwork_rsl(data,
struct rtllib_device, hw_sleep_wq);
struct net_device *dev = ieee->dev;
_rtl92e_hw_sleep(dev);
}
void rtl92e_hw_wakeup(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
unsigned long flags = 0;
spin_lock_irqsave(&priv->rf_ps_lock, flags);
if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
schedule_delayed_work(&priv->rtllib->hw_wakeup_wq,
msecs_to_jiffies(10));
return;
}
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
rtl92e_set_rf_state(dev, rf_on, RF_CHANGE_BY_PS);
}
void rtl92e_hw_wakeup_wq(void *data)
{
struct rtllib_device *ieee = container_of_dwork_rsl(data,
struct rtllib_device, hw_wakeup_wq);
struct net_device *dev = ieee->dev;
rtl92e_hw_wakeup(dev);
}
#define MIN_SLEEP_TIME 50
#define MAX_SLEEP_TIME 10000
void rtl92e_enter_sleep(struct net_device *dev, u64 time)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 tmp;
unsigned long flags;
unsigned long timeout;
spin_lock_irqsave(&priv->ps_lock, flags);
time -= msecs_to_jiffies(8 + 16 + 7);
timeout = jiffies + msecs_to_jiffies(MIN_SLEEP_TIME);
if (time_before((unsigned long)time, timeout)) {
spin_unlock_irqrestore(&priv->ps_lock, flags);
netdev_info(dev, "too short to sleep::%lld < %ld\n",
time - jiffies, msecs_to_jiffies(MIN_SLEEP_TIME));
return;
}
timeout = jiffies + msecs_to_jiffies(MAX_SLEEP_TIME);
if (time_after((unsigned long)time, timeout)) {
netdev_info(dev, "========>too long to sleep:%lld > %ld\n",
time - jiffies, msecs_to_jiffies(MAX_SLEEP_TIME));
spin_unlock_irqrestore(&priv->ps_lock, flags);
return;
}
tmp = time - jiffies;
schedule_delayed_work(&priv->rtllib->hw_wakeup_wq, tmp);
schedule_delayed_work(&priv->rtllib->hw_sleep_wq, 0);
spin_unlock_irqrestore(&priv->ps_lock, flags);
}
static void _rtl92e_ps_update_rf_state(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
&priv->rtllib->pwr_save_ctrl;
psc->bSwRfProcessing = true;
rtl92e_set_rf_state(dev, psc->eInactivePowerState, RF_CHANGE_BY_IPS);
psc->bSwRfProcessing = false;
}
void rtl92e_ips_enter(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
&priv->rtllib->pwr_save_ctrl;
enum rt_rf_power_state rt_state;
rt_state = priv->rtllib->rf_power_state;
if (rt_state == rf_on && !psc->bSwRfProcessing &&
(priv->rtllib->link_state != MAC80211_LINKED)) {
psc->eInactivePowerState = rf_off;
_rtl92e_ps_update_rf_state(dev);
}
}
void rtl92e_ips_leave(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
&priv->rtllib->pwr_save_ctrl;
enum rt_rf_power_state rt_state;
rt_state = priv->rtllib->rf_power_state;
if (rt_state != rf_on && !psc->bSwRfProcessing &&
priv->rtllib->rf_off_reason <= RF_CHANGE_BY_IPS) {
psc->eInactivePowerState = rf_on;
_rtl92e_ps_update_rf_state(dev);
}
}
void rtl92e_ips_leave_wq(void *data)
{
struct rtllib_device *ieee = container_of(data, struct rtllib_device, ips_leave_wq);
struct net_device *dev = ieee->dev;
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
mutex_unlock(&priv->rtllib->ips_mutex);
}
void rtl92e_rtllib_ips_leave_wq(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
enum rt_rf_power_state rt_state;
rt_state = priv->rtllib->rf_power_state;
if (rt_state == rf_off) {
if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
return;
}
netdev_info(dev, "=========>%s(): rtl92e_ips_leave\n",
__func__);
schedule_work(&priv->rtllib->ips_leave_wq);
}
}
void rtl92e_rtllib_ips_leave(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
mutex_unlock(&priv->rtllib->ips_mutex);
}
static bool _rtl92e_ps_set_mode(struct net_device *dev, u8 rtPsMode)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->rtllib->iw_mode == IW_MODE_ADHOC)
return false;
if (!priv->ps_force)
priv->rtllib->ps = rtPsMode;
if (priv->rtllib->sta_sleep != LPS_IS_WAKE &&
rtPsMode == RTLLIB_PS_DISABLED) {
unsigned long flags;
rtl92e_hw_wakeup(dev);
priv->rtllib->sta_sleep = LPS_IS_WAKE;
spin_lock_irqsave(&(priv->rtllib->mgmt_tx_lock), flags);
rtllib_sta_ps_send_null_frame(priv->rtllib, 0);
spin_unlock_irqrestore(&(priv->rtllib->mgmt_tx_lock), flags);
}
return true;
}
void rtl92e_leisure_ps_enter(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
&priv->rtllib->pwr_save_ctrl;
if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) &&
(priv->rtllib->link_state == MAC80211_LINKED))
|| (priv->rtllib->iw_mode == IW_MODE_ADHOC))
return;
if (psc->bLeisurePs) {
if (psc->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) {
if (priv->rtllib->ps == RTLLIB_PS_DISABLED)
_rtl92e_ps_set_mode(dev, RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST);
} else {
psc->LpsIdleCount++;
}
}
}
void rtl92e_leisure_ps_leave(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
&priv->rtllib->pwr_save_ctrl;
if (psc->bLeisurePs) {
if (priv->rtllib->ps != RTLLIB_PS_DISABLED)
_rtl92e_ps_set_mode(dev, RTLLIB_PS_DISABLED);
}
}
| linux-master | drivers/staging/rtl8192e/rtl8192e/rtl_ps.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <[email protected]>, et al.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtl_core.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
#include "r8190P_rtl8256.h"
#include "r8192E_cmdpkt.h"
#include "rtl_dm.h"
#include "rtl_wx.h"
static int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI,
EDCAPARA_VO};
void rtl92e_start_beacon(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
struct rtllib_network *net = &priv->rtllib->current_network;
u16 BcnTimeCfg = 0;
u16 BcnCW = 6;
u16 BcnIFS = 0xf;
rtl92e_irq_disable(dev);
rtl92e_writew(dev, ATIMWND, 2);
rtl92e_writew(dev, BCN_INTERVAL, net->beacon_interval);
rtl92e_writew(dev, BCN_DRV_EARLY_INT, 10);
rtl92e_writew(dev, BCN_DMATIME, 256);
rtl92e_writeb(dev, BCN_ERR_THRESH, 100);
BcnTimeCfg |= BcnCW << BCN_TCFG_CW_SHIFT;
BcnTimeCfg |= BcnIFS << BCN_TCFG_IFS;
rtl92e_writew(dev, BCN_TCFG, BcnTimeCfg);
rtl92e_irq_enable(dev);
}
static void _rtl92e_update_msr(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 msr;
msr = rtl92e_readb(dev, MSR);
msr &= ~MSR_LINK_MASK;
switch (priv->rtllib->iw_mode) {
case IW_MODE_INFRA:
if (priv->rtllib->link_state == MAC80211_LINKED)
msr |= MSR_LINK_MANAGED;
break;
case IW_MODE_ADHOC:
if (priv->rtllib->link_state == MAC80211_LINKED)
msr |= MSR_LINK_ADHOC;
break;
default:
break;
}
rtl92e_writeb(dev, MSR, msr);
}
void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
{
struct r8192_priv *priv = rtllib_priv(dev);
switch (variable) {
case HW_VAR_BSSID:
/* BSSIDR 2 byte alignment */
rtl92e_writew(dev, BSSIDR, *(u16 *)val);
rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2));
break;
case HW_VAR_MEDIA_STATUS:
{
enum rt_op_mode OpMode = *((enum rt_op_mode *)(val));
u8 btMsr = rtl92e_readb(dev, MSR);
btMsr &= 0xfc;
switch (OpMode) {
case RT_OP_MODE_INFRASTRUCTURE:
btMsr |= MSR_INFRA;
break;
case RT_OP_MODE_IBSS:
btMsr |= MSR_ADHOC;
break;
case RT_OP_MODE_AP:
btMsr |= MSR_AP;
break;
default:
btMsr |= MSR_NOLINK;
break;
}
rtl92e_writeb(dev, MSR, btMsr);
}
break;
case HW_VAR_CECHK_BSSID:
{
u32 RegRCR, Type;
Type = val[0];
RegRCR = rtl92e_readl(dev, RCR);
priv->receive_config = RegRCR;
if (Type)
RegRCR |= (RCR_CBSSID);
else
RegRCR &= (~RCR_CBSSID);
rtl92e_writel(dev, RCR, RegRCR);
priv->receive_config = RegRCR;
}
break;
case HW_VAR_SLOT_TIME:
priv->slot_time = val[0];
rtl92e_writeb(dev, SLOT_TIME, val[0]);
break;
case HW_VAR_ACK_PREAMBLE:
{
u32 regTmp;
priv->short_preamble = (bool)*val;
regTmp = priv->basic_rate;
if (priv->short_preamble)
regTmp |= BRSR_AckShortPmb;
rtl92e_writel(dev, RRSR, regTmp);
break;
}
case HW_VAR_CPU_RST:
rtl92e_writel(dev, CPU_GEN, ((u32 *)(val))[0]);
break;
case HW_VAR_AC_PARAM:
{
u8 pAcParam = *val;
u32 eACI = pAcParam;
u8 u1bAIFS;
u32 u4bAcParam;
u8 mode = priv->rtllib->mode;
struct rtllib_qos_parameters *qop =
&priv->rtllib->current_network.qos_data.parameters;
u1bAIFS = qop->aifs[pAcParam] *
((mode & (WIRELESS_MODE_G | WIRELESS_MODE_N_24G)) ? 9 : 20) + aSifsTime;
rtl92e_dm_init_edca_turbo(dev);
u4bAcParam = (le16_to_cpu(qop->tx_op_limit[pAcParam]) <<
AC_PARAM_TXOP_LIMIT_OFFSET) |
((le16_to_cpu(qop->cw_max[pAcParam])) <<
AC_PARAM_ECW_MAX_OFFSET) |
((le16_to_cpu(qop->cw_min[pAcParam])) <<
AC_PARAM_ECW_MIN_OFFSET) |
(((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET);
switch (eACI) {
case AC1_BK:
rtl92e_writel(dev, EDCAPARA_BK, u4bAcParam);
break;
case AC0_BE:
rtl92e_writel(dev, EDCAPARA_BE, u4bAcParam);
break;
case AC2_VI:
rtl92e_writel(dev, EDCAPARA_VI, u4bAcParam);
break;
case AC3_VO:
rtl92e_writel(dev, EDCAPARA_VO, u4bAcParam);
break;
default:
netdev_info(dev, "SetHwReg8185(): invalid ACI: %d !\n",
eACI);
break;
}
priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACM_CTRL,
&pAcParam);
break;
}
case HW_VAR_ACM_CTRL:
{
struct rtllib_qos_parameters *qos_parameters =
&priv->rtllib->current_network.qos_data.parameters;
u8 pAcParam = *val;
u32 eACI = pAcParam;
union aci_aifsn *pAciAifsn = (union aci_aifsn *)&
(qos_parameters->aifs[0]);
u8 acm = pAciAifsn->f.acm;
u8 AcmCtrl = rtl92e_readb(dev, ACM_HW_CTRL);
if (acm) {
switch (eACI) {
case AC0_BE:
AcmCtrl |= ACM_HW_BEQ_EN;
break;
case AC2_VI:
AcmCtrl |= ACM_HW_VIQ_EN;
break;
case AC3_VO:
AcmCtrl |= ACM_HW_VOQ_EN;
break;
}
} else {
switch (eACI) {
case AC0_BE:
AcmCtrl &= (~ACM_HW_BEQ_EN);
break;
case AC2_VI:
AcmCtrl &= (~ACM_HW_VIQ_EN);
break;
case AC3_VO:
AcmCtrl &= (~ACM_HW_BEQ_EN);
break;
default:
break;
}
}
rtl92e_writeb(dev, ACM_HW_CTRL, AcmCtrl);
break;
}
case HW_VAR_SIFS:
rtl92e_writeb(dev, SIFS, val[0]);
rtl92e_writeb(dev, SIFS + 1, val[0]);
break;
case HW_VAR_RF_TIMING:
{
u8 Rf_Timing = *val;
rtl92e_writeb(dev, rFPGA0_RFTiming1, Rf_Timing);
break;
}
default:
break;
}
}
static void _rtl92e_read_eeprom_info(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
const u8 bMac_Tmp_Addr[ETH_ALEN] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x01};
u8 tempval;
u8 ICVer8192, ICVer8256;
u16 i, usValue, IC_Version;
u16 EEPROMId;
EEPROMId = rtl92e_eeprom_read(dev, 0);
if (EEPROMId != RTL8190_EEPROM_ID) {
netdev_err(dev, "%s(): Invalid EEPROM ID: %x\n", __func__,
EEPROMId);
priv->autoload_fail_flag = true;
} else {
priv->autoload_fail_flag = false;
}
if (!priv->autoload_fail_flag) {
priv->eeprom_vid = rtl92e_eeprom_read(dev, EEPROM_VID >> 1);
priv->eeprom_did = rtl92e_eeprom_read(dev, EEPROM_DID >> 1);
usValue = rtl92e_eeprom_read(dev,
(EEPROM_Customer_ID >> 1)) >> 8;
priv->eeprom_customer_id = usValue & 0xff;
usValue = rtl92e_eeprom_read(dev,
EEPROM_ICVersion_ChannelPlan >> 1);
priv->eeprom_chnl_plan = usValue & 0xff;
IC_Version = (usValue & 0xff00) >> 8;
ICVer8192 = IC_Version & 0xf;
ICVer8256 = (IC_Version & 0xf0) >> 4;
if (ICVer8192 == 0x2) {
if (ICVer8256 == 0x5)
priv->card_8192_version = VERSION_8190_BE;
}
switch (priv->card_8192_version) {
case VERSION_8190_BD:
case VERSION_8190_BE:
break;
default:
priv->card_8192_version = VERSION_8190_BD;
break;
}
} else {
priv->card_8192_version = VERSION_8190_BD;
priv->eeprom_vid = 0;
priv->eeprom_did = 0;
priv->eeprom_customer_id = 0;
priv->eeprom_chnl_plan = 0;
}
if (!priv->autoload_fail_flag) {
u8 addr[ETH_ALEN];
for (i = 0; i < 6; i += 2) {
usValue = rtl92e_eeprom_read(dev,
(EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1);
*(u16 *)(&addr[i]) = usValue;
}
eth_hw_addr_set(dev, addr);
} else {
eth_hw_addr_set(dev, bMac_Tmp_Addr);
}
if (priv->card_8192_version > VERSION_8190_BD)
priv->tx_pwr_data_read_from_eeprom = true;
else
priv->tx_pwr_data_read_from_eeprom = false;
if (priv->card_8192_version > VERSION_8190_BD) {
if (!priv->autoload_fail_flag) {
tempval = (rtl92e_eeprom_read(dev,
(EEPROM_RFInd_PowerDiff >> 1))) & 0xff;
priv->eeprom_legacy_ht_tx_pwr_diff = tempval & 0xf;
} else {
priv->eeprom_legacy_ht_tx_pwr_diff = 0x04;
}
if (!priv->autoload_fail_flag)
priv->eeprom_thermal_meter = ((rtl92e_eeprom_read(dev,
(EEPROM_ThermalMeter >> 1))) &
0xff00) >> 8;
else
priv->eeprom_thermal_meter = EEPROM_Default_ThermalMeter;
priv->tssi_13dBm = priv->eeprom_thermal_meter * 100;
if (priv->epromtype == EEPROM_93C46) {
if (!priv->autoload_fail_flag) {
usValue = rtl92e_eeprom_read(dev,
EEPROM_TxPwDiff_CrystalCap >> 1);
priv->eeprom_ant_pwr_diff = usValue & 0x0fff;
priv->eeprom_crystal_cap = (usValue & 0xf000)
>> 12;
} else {
priv->eeprom_ant_pwr_diff =
EEPROM_Default_AntTxPowerDiff;
priv->eeprom_crystal_cap =
EEPROM_Default_TxPwDiff_CrystalCap;
}
for (i = 0; i < 14; i += 2) {
if (!priv->autoload_fail_flag)
usValue = rtl92e_eeprom_read(dev,
(EEPROM_TxPwIndex_CCK + i) >> 1);
else
usValue = EEPROM_Default_TxPower;
*((u16 *)(&priv->eeprom_tx_pwr_level_cck[i])) =
usValue;
}
for (i = 0; i < 14; i += 2) {
if (!priv->autoload_fail_flag)
usValue = rtl92e_eeprom_read(dev,
(EEPROM_TxPwIndex_OFDM_24G + i) >> 1);
else
usValue = EEPROM_Default_TxPower;
*((u16 *)(&priv->eeprom_tx_pwr_level_ofdm24g[i]))
= usValue;
}
}
if (priv->epromtype == EEPROM_93C46) {
for (i = 0; i < 14; i++) {
priv->tx_pwr_level_cck[i] =
priv->eeprom_tx_pwr_level_cck[i];
priv->tx_pwr_level_ofdm_24g[i] =
priv->eeprom_tx_pwr_level_ofdm24g[i];
}
priv->legacy_ht_tx_pwr_diff =
priv->eeprom_legacy_ht_tx_pwr_diff;
priv->antenna_tx_pwr_diff[0] = priv->eeprom_ant_pwr_diff & 0xf;
priv->antenna_tx_pwr_diff[1] = (priv->eeprom_ant_pwr_diff &
0xf0) >> 4;
priv->antenna_tx_pwr_diff[2] = (priv->eeprom_ant_pwr_diff &
0xf00) >> 8;
priv->crystal_cap = priv->eeprom_crystal_cap;
priv->thermal_meter[0] = priv->eeprom_thermal_meter & 0xf;
priv->thermal_meter[1] = (priv->eeprom_thermal_meter &
0xf0) >> 4;
} else if (priv->epromtype == EEPROM_93C56) {
priv->legacy_ht_tx_pwr_diff =
priv->eeprom_legacy_ht_tx_pwr_diff;
priv->antenna_tx_pwr_diff[0] = 0;
priv->antenna_tx_pwr_diff[1] = 0;
priv->antenna_tx_pwr_diff[2] = 0;
priv->crystal_cap = priv->eeprom_crystal_cap;
priv->thermal_meter[0] = priv->eeprom_thermal_meter & 0xf;
priv->thermal_meter[1] = (priv->eeprom_thermal_meter &
0xf0) >> 4;
}
}
rtl92e_init_adaptive_rate(dev);
priv->chnl_plan = priv->eeprom_chnl_plan;
switch (priv->eeprom_customer_id) {
case EEPROM_CID_NetCore:
priv->customer_id = RT_CID_819X_NETCORE;
break;
case EEPROM_CID_TOSHIBA:
priv->customer_id = RT_CID_TOSHIBA;
if (priv->eeprom_chnl_plan & 0x80)
priv->chnl_plan = priv->eeprom_chnl_plan & 0x7f;
else
priv->chnl_plan = 0x0;
break;
}
if (priv->chnl_plan > CHANNEL_PLAN_LEN - 1)
priv->chnl_plan = 0;
priv->chnl_plan = COUNTRY_CODE_WORLD_WIDE_13;
if (priv->eeprom_vid == 0x1186 && priv->eeprom_did == 0x3304)
priv->rtllib->bSupportRemoteWakeUp = true;
else
priv->rtllib->bSupportRemoteWakeUp = false;
}
void rtl92e_get_eeprom_size(struct net_device *dev)
{
u16 curCR;
struct r8192_priv *priv = rtllib_priv(dev);
curCR = rtl92e_readw(dev, EPROM_CMD);
priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
EEPROM_93C46;
_rtl92e_read_eeprom_info(dev);
}
static void _rtl92e_hwconfig(struct net_device *dev)
{
u32 regRATR = 0, regRRSR = 0;
u8 regBwOpMode = 0, regTmp = 0;
struct r8192_priv *priv = rtllib_priv(dev);
switch (priv->rtllib->mode) {
case WIRELESS_MODE_B:
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK;
regRRSR = RATE_ALL_CCK;
break;
case WIRELESS_MODE_AUTO:
case WIRELESS_MODE_N_24G:
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_G:
default:
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
}
rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
{
u32 ratr_value;
ratr_value = regRATR;
ratr_value &= ~(RATE_ALL_OFDM_2SS);
rtl92e_writel(dev, RATR0, ratr_value);
rtl92e_writeb(dev, UFWP, 1);
}
regTmp = rtl92e_readb(dev, 0x313);
regRRSR = ((regTmp) << 24) | (regRRSR & 0x00ffffff);
rtl92e_writel(dev, RRSR, regRRSR);
rtl92e_writew(dev, RETRY_LIMIT,
priv->short_retry_limit << RETRY_LIMIT_SHORT_SHIFT |
priv->long_retry_limit << RETRY_LIMIT_LONG_SHIFT);
}
bool rtl92e_start_adapter(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 ulRegRead;
bool rtStatus = true;
u8 tmpvalue;
u8 ICVersion, SwitchingRegulatorOutput;
bool bfirmwareok = true;
u32 tmpRegA, TempCCk;
int i = 0;
u32 retry_times = 0;
priv->being_init_adapter = true;
start:
rtl92e_reset_desc_ring(dev);
priv->rf_mode = RF_OP_By_SW_3wire;
if (priv->rst_progress == RESET_TYPE_NORESET) {
rtl92e_writeb(dev, ANAPAR, 0x37);
mdelay(500);
}
priv->fw_info->status = FW_STATUS_0_INIT;
ulRegRead = rtl92e_readl(dev, CPU_GEN);
if (priv->fw_info->status == FW_STATUS_0_INIT)
ulRegRead |= CPU_GEN_SYSTEM_RESET;
else if (priv->fw_info->status == FW_STATUS_5_READY)
ulRegRead |= CPU_GEN_FIRMWARE_RESET;
else
netdev_err(dev, "%s(): undefined firmware state: %d.\n",
__func__, priv->fw_info->status);
rtl92e_writel(dev, CPU_GEN, ulRegRead);
ICVersion = rtl92e_readb(dev, IC_VERRSION);
if (ICVersion >= 0x4) {
SwitchingRegulatorOutput = rtl92e_readb(dev, SWREGULATOR);
if (SwitchingRegulatorOutput != 0xb8) {
rtl92e_writeb(dev, SWREGULATOR, 0xa8);
mdelay(1);
rtl92e_writeb(dev, SWREGULATOR, 0xb8);
}
}
rtStatus = rtl92e_config_bb(dev);
if (!rtStatus) {
netdev_warn(dev, "%s(): Failed to configure BB\n", __func__);
return rtStatus;
}
priv->loopback_mode = RTL819X_NO_LOOPBACK;
if (priv->rst_progress == RESET_TYPE_NORESET) {
ulRegRead = rtl92e_readl(dev, CPU_GEN);
if (priv->loopback_mode == RTL819X_NO_LOOPBACK)
ulRegRead = (ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) |
CPU_GEN_NO_LOOPBACK_SET;
else if (priv->loopback_mode == RTL819X_MAC_LOOPBACK)
ulRegRead |= CPU_CCK_LOOPBACK;
else
netdev_err(dev, "%s: Invalid loopback mode setting.\n",
__func__);
rtl92e_writel(dev, CPU_GEN, ulRegRead);
udelay(500);
}
_rtl92e_hwconfig(dev);
rtl92e_writeb(dev, CMDR, CR_RE | CR_TE);
rtl92e_writeb(dev, PCIF, ((MXDMA2_NO_LIMIT << MXDMA2_RX_SHIFT) |
(MXDMA2_NO_LIMIT << MXDMA2_TX_SHIFT)));
rtl92e_writel(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
rtl92e_writew(dev, MAC4, ((u16 *)(dev->dev_addr + 4))[0]);
rtl92e_writel(dev, RCR, priv->receive_config);
rtl92e_writel(dev, RQPN1, NUM_OF_PAGE_IN_FW_QUEUE_BK <<
RSVD_FW_QUEUE_PAGE_BK_SHIFT |
NUM_OF_PAGE_IN_FW_QUEUE_BE <<
RSVD_FW_QUEUE_PAGE_BE_SHIFT |
NUM_OF_PAGE_IN_FW_QUEUE_VI <<
RSVD_FW_QUEUE_PAGE_VI_SHIFT |
NUM_OF_PAGE_IN_FW_QUEUE_VO <<
RSVD_FW_QUEUE_PAGE_VO_SHIFT);
rtl92e_writel(dev, RQPN2, NUM_OF_PAGE_IN_FW_QUEUE_MGNT <<
RSVD_FW_QUEUE_PAGE_MGNT_SHIFT);
rtl92e_writel(dev, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW |
NUM_OF_PAGE_IN_FW_QUEUE_BCN <<
RSVD_FW_QUEUE_PAGE_BCN_SHIFT |
NUM_OF_PAGE_IN_FW_QUEUE_PUB <<
RSVD_FW_QUEUE_PAGE_PUB_SHIFT);
rtl92e_tx_enable(dev);
rtl92e_rx_enable(dev);
ulRegRead = (0xFFF00000 & rtl92e_readl(dev, RRSR)) |
RATE_ALL_OFDM_AG | RATE_ALL_CCK;
rtl92e_writel(dev, RRSR, ulRegRead);
rtl92e_writel(dev, RATR0 + 4 * 7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
rtl92e_writeb(dev, ACK_TIMEOUT, 0x30);
if (priv->rst_progress == RESET_TYPE_NORESET)
rtl92e_set_wireless_mode(dev, priv->rtllib->mode);
rtl92e_cam_reset(dev);
{
u8 SECR_value = 0x0;
SECR_value |= SCR_TxEncEnable;
SECR_value |= SCR_RxDecEnable;
SECR_value |= SCR_NoSKMC;
rtl92e_writeb(dev, SECR, SECR_value);
}
rtl92e_writew(dev, ATIMWND, 2);
rtl92e_writew(dev, BCN_INTERVAL, 100);
for (i = 0; i < QOS_QUEUE_NUM; i++)
rtl92e_writel(dev, WDCAPARA_ADD[i], 0x005e4332);
rtl92e_writeb(dev, 0xbe, 0xc0);
rtl92e_config_mac(dev);
if (priv->card_8192_version > VERSION_8190_BD) {
rtl92e_get_tx_power(dev);
rtl92e_set_tx_power(dev, priv->chan);
}
tmpvalue = rtl92e_readb(dev, IC_VERRSION);
priv->ic_cut = tmpvalue;
bfirmwareok = rtl92e_init_fw(dev);
if (!bfirmwareok) {
if (retry_times < 10) {
retry_times++;
goto start;
} else {
rtStatus = false;
goto end;
}
}
if (priv->rst_progress == RESET_TYPE_NORESET) {
rtStatus = rtl92e_config_rf(dev);
if (!rtStatus) {
netdev_info(dev, "RF Config failed\n");
return rtStatus;
}
}
rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bOFDMEn, 0x1);
rtl92e_writeb(dev, 0x87, 0x0);
if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_PS) {
rtl92e_set_rf_state(dev, rf_off, priv->rtllib->rf_off_reason);
} else if (priv->rtllib->rf_off_reason >= RF_CHANGE_BY_IPS) {
rtl92e_set_rf_state(dev, rf_off, priv->rtllib->rf_off_reason);
} else {
priv->rtllib->rf_power_state = rf_on;
priv->rtllib->rf_off_reason = 0;
}
if (priv->rtllib->FwRWRF)
priv->rf_mode = RF_OP_By_FW;
else
priv->rf_mode = RF_OP_By_SW_3wire;
if (priv->rst_progress == RESET_TYPE_NORESET) {
rtl92e_dm_init_txpower_tracking(dev);
if (priv->ic_cut >= IC_VersionCut_D) {
tmpRegA = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
bMaskDWord);
rtl92e_get_bb_reg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord);
for (i = 0; i < TX_BB_GAIN_TABLE_LEN; i++) {
if (tmpRegA == dm_tx_bb_gain[i]) {
priv->rfa_txpowertrackingindex = i;
priv->rfa_txpowertrackingindex_real = i;
priv->rfa_txpowertracking_default =
priv->rfa_txpowertrackingindex;
break;
}
}
TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1,
bMaskByte2);
for (i = 0; i < CCK_TX_BB_GAIN_TABLE_LEN; i++) {
if (TempCCk == dm_cck_tx_bb_gain[i][0]) {
priv->cck_present_attn_20m_def = i;
break;
}
}
priv->cck_present_attn_40m_def = 0;
priv->cck_present_attn_diff = 0;
priv->cck_present_attn =
priv->cck_present_attn_20m_def;
priv->btxpower_tracking = false;
}
}
rtl92e_irq_enable(dev);
end:
priv->being_init_adapter = false;
return rtStatus;
}
static void _rtl92e_net_update(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_network *net;
u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
u16 rate_config = 0;
net = &priv->rtllib->current_network;
rtl92e_config_rate(dev, &rate_config);
priv->dot11_current_preamble_mode = PREAMBLE_AUTO;
priv->basic_rate = rate_config &= 0x15f;
rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
rtl92e_writew(dev, ATIMWND, 2);
rtl92e_writew(dev, BCN_DMATIME, 256);
rtl92e_writew(dev, BCN_INTERVAL, net->beacon_interval);
rtl92e_writew(dev, BCN_DRV_EARLY_INT, 10);
rtl92e_writeb(dev, BCN_ERR_THRESH, 100);
BcnTimeCfg |= (BcnCW << BCN_TCFG_CW_SHIFT);
BcnTimeCfg |= BcnIFS << BCN_TCFG_IFS;
rtl92e_writew(dev, BCN_TCFG, BcnTimeCfg);
}
}
void rtl92e_link_change(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
if (!priv->up)
return;
if (ieee->link_state == MAC80211_LINKED) {
_rtl92e_net_update(dev);
rtl92e_update_ratr_table(dev);
if ((ieee->pairwise_key_type == KEY_TYPE_WEP40) ||
(ieee->pairwise_key_type == KEY_TYPE_WEP104))
rtl92e_enable_hw_security_config(dev);
} else {
rtl92e_writeb(dev, 0x173, 0);
}
_rtl92e_update_msr(dev);
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
u32 reg;
reg = rtl92e_readl(dev, RCR);
if (priv->rtllib->link_state == MAC80211_LINKED) {
if (ieee->intel_promiscuous_md_info.promiscuous_on)
;
else
priv->receive_config = reg |= RCR_CBSSID;
} else {
priv->receive_config = reg &= ~RCR_CBSSID;
}
rtl92e_writel(dev, RCR, reg);
}
}
void rtl92e_set_monitor_mode(struct net_device *dev, bool bAllowAllDA,
bool WriteIntoReg)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (bAllowAllDA)
priv->receive_config |= RCR_AAP;
else
priv->receive_config &= ~RCR_AAP;
if (WriteIntoReg)
rtl92e_writel(dev, RCR, priv->receive_config);
}
static u8 _rtl92e_rate_mgn_to_hw(u8 rate)
{
u8 ret = DESC90_RATE1M;
switch (rate) {
case MGN_1M:
ret = DESC90_RATE1M;
break;
case MGN_2M:
ret = DESC90_RATE2M;
break;
case MGN_5_5M:
ret = DESC90_RATE5_5M;
break;
case MGN_11M:
ret = DESC90_RATE11M;
break;
case MGN_6M:
ret = DESC90_RATE6M;
break;
case MGN_9M:
ret = DESC90_RATE9M;
break;
case MGN_12M:
ret = DESC90_RATE12M;
break;
case MGN_18M:
ret = DESC90_RATE18M;
break;
case MGN_24M:
ret = DESC90_RATE24M;
break;
case MGN_36M:
ret = DESC90_RATE36M;
break;
case MGN_48M:
ret = DESC90_RATE48M;
break;
case MGN_54M:
ret = DESC90_RATE54M;
break;
case MGN_MCS0:
ret = DESC90_RATEMCS0;
break;
case MGN_MCS1:
ret = DESC90_RATEMCS1;
break;
case MGN_MCS2:
ret = DESC90_RATEMCS2;
break;
case MGN_MCS3:
ret = DESC90_RATEMCS3;
break;
case MGN_MCS4:
ret = DESC90_RATEMCS4;
break;
case MGN_MCS5:
ret = DESC90_RATEMCS5;
break;
case MGN_MCS6:
ret = DESC90_RATEMCS6;
break;
case MGN_MCS7:
ret = DESC90_RATEMCS7;
break;
case MGN_MCS8:
ret = DESC90_RATEMCS8;
break;
case MGN_MCS9:
ret = DESC90_RATEMCS9;
break;
case MGN_MCS10:
ret = DESC90_RATEMCS10;
break;
case MGN_MCS11:
ret = DESC90_RATEMCS11;
break;
case MGN_MCS12:
ret = DESC90_RATEMCS12;
break;
case MGN_MCS13:
ret = DESC90_RATEMCS13;
break;
case MGN_MCS14:
ret = DESC90_RATEMCS14;
break;
case MGN_MCS15:
ret = DESC90_RATEMCS15;
break;
case (0x80 | 0x20):
ret = DESC90_RATEMCS32;
break;
default:
break;
}
return ret;
}
static u8 _rtl92e_hw_queue_to_fw_queue(struct net_device *dev, u8 QueueID,
u8 priority)
{
u8 QueueSelect = 0x0;
switch (QueueID) {
case BE_QUEUE:
QueueSelect = QSLT_BE;
break;
case BK_QUEUE:
QueueSelect = QSLT_BK;
break;
case VO_QUEUE:
QueueSelect = QSLT_VO;
break;
case VI_QUEUE:
QueueSelect = QSLT_VI;
break;
case MGNT_QUEUE:
QueueSelect = QSLT_MGNT;
break;
case BEACON_QUEUE:
QueueSelect = QSLT_BEACON;
break;
case TXCMD_QUEUE:
QueueSelect = QSLT_CMD;
break;
case HIGH_QUEUE:
QueueSelect = QSLT_HIGH;
break;
default:
netdev_warn(dev, "%s(): Impossible Queue Selection: %d\n",
__func__, QueueID);
break;
}
return QueueSelect;
}
static u8 _rtl92e_query_is_short(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc)
{
u8 tmp_Short;
tmp_Short = (TxHT == 1) ? ((tcb_desc->bUseShortGI) ? 1 : 0) :
((tcb_desc->bUseShortPreamble) ? 1 : 0);
if (TxHT == 1 && TxRate != DESC90_RATEMCS15)
tmp_Short = 0;
return tmp_Short;
}
void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
struct cb_desc *cb_desc, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
dma_addr_t mapping;
struct tx_fwinfo_8190pci *pTxFwInfo;
pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
pTxFwInfo->TxHT = (cb_desc->data_rate & 0x80) ? 1 : 0;
pTxFwInfo->TxRate = _rtl92e_rate_mgn_to_hw(cb_desc->data_rate);
pTxFwInfo->EnableCPUDur = cb_desc->bTxEnableFwCalcDur;
pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
pTxFwInfo->TxRate, cb_desc);
if (cb_desc->bAMPDUEnable) {
pTxFwInfo->AllowAggregation = 1;
pTxFwInfo->RxMF = cb_desc->ampdu_factor;
pTxFwInfo->RxAMD = cb_desc->ampdu_density;
} else {
pTxFwInfo->AllowAggregation = 0;
pTxFwInfo->RxMF = 0;
pTxFwInfo->RxAMD = 0;
}
pTxFwInfo->RtsEnable = (cb_desc->bRTSEnable) ? 1 : 0;
pTxFwInfo->CtsEnable = (cb_desc->bCTSEnable) ? 1 : 0;
pTxFwInfo->RtsSTBC = (cb_desc->bRTSSTBC) ? 1 : 0;
pTxFwInfo->RtsHT = (cb_desc->rts_rate & 0x80) ? 1 : 0;
pTxFwInfo->RtsRate = _rtl92e_rate_mgn_to_hw(cb_desc->rts_rate);
pTxFwInfo->RtsBandwidth = 0;
pTxFwInfo->RtsSubcarrier = cb_desc->RTSSC;
pTxFwInfo->RtsShort = (pTxFwInfo->RtsHT == 0) ?
(cb_desc->bRTSUseShortPreamble ? 1 : 0) :
(cb_desc->bRTSUseShortGI ? 1 : 0);
if (priv->current_chnl_bw == HT_CHANNEL_WIDTH_20_40) {
if (cb_desc->bPacketBW) {
pTxFwInfo->TxBandwidth = 1;
pTxFwInfo->TxSubCarrier = 0;
} else {
pTxFwInfo->TxBandwidth = 0;
pTxFwInfo->TxSubCarrier = priv->n_cur_40mhz_prime_sc;
}
} else {
pTxFwInfo->TxBandwidth = 0;
pTxFwInfo->TxSubCarrier = 0;
}
memset((u8 *)pdesc, 0, 12);
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, mapping)) {
netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
return;
}
pdesc->LINIP = 0;
pdesc->CmdInit = 1;
pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
pdesc->PktSize = skb->len - sizeof(struct tx_fwinfo_8190pci);
pdesc->SecCAMID = 0;
pdesc->RATid = cb_desc->ratr_index;
pdesc->NoEnc = 1;
pdesc->SecType = 0x0;
if (cb_desc->bHwSec) {
static u8 tmp;
if (!tmp)
tmp = 1;
switch (priv->rtllib->pairwise_key_type) {
case KEY_TYPE_WEP40:
case KEY_TYPE_WEP104:
pdesc->SecType = 0x1;
pdesc->NoEnc = 0;
break;
case KEY_TYPE_TKIP:
pdesc->SecType = 0x2;
pdesc->NoEnc = 0;
break;
case KEY_TYPE_CCMP:
pdesc->SecType = 0x3;
pdesc->NoEnc = 0;
break;
case KEY_TYPE_NA:
pdesc->SecType = 0x0;
pdesc->NoEnc = 1;
break;
}
}
pdesc->PktId = 0x0;
pdesc->QueueSelect = _rtl92e_hw_queue_to_fw_queue(dev,
cb_desc->queue_index,
cb_desc->priority);
pdesc->TxFWInfoSize = sizeof(struct tx_fwinfo_8190pci);
pdesc->DISFB = cb_desc->tx_dis_rate_fallback;
pdesc->USERATE = cb_desc->tx_use_drv_assinged_rate;
pdesc->FirstSeg = 1;
pdesc->LastSeg = 1;
pdesc->TxBufferSize = skb->len;
pdesc->TxBuffAddr = mapping;
}
void rtl92e_fill_tx_cmd_desc(struct net_device *dev, struct tx_desc_cmd *entry,
struct cb_desc *cb_desc, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
dma_addr_t mapping = dma_map_single(&priv->pdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, mapping))
netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
memset(entry, 0, 12);
entry->LINIP = cb_desc->bLastIniPkt;
entry->FirstSeg = 1;
entry->LastSeg = 1;
if (cb_desc->bCmdOrInit == DESC_PACKET_TYPE_INIT) {
entry->CmdInit = DESC_PACKET_TYPE_INIT;
} else {
struct tx_desc *entry_tmp = (struct tx_desc *)entry;
entry_tmp->CmdInit = DESC_PACKET_TYPE_NORMAL;
entry_tmp->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
entry_tmp->PktSize = cb_desc->pkt_size + entry_tmp->Offset;
entry_tmp->QueueSelect = QSLT_CMD;
entry_tmp->TxFWInfoSize = 0x08;
entry_tmp->RATid = DESC_PACKET_TYPE_INIT;
}
entry->TxBufferSize = skb->len;
entry->TxBuffAddr = mapping;
entry->OWN = 1;
}
static u8 _rtl92e_rate_hw_to_mgn(bool bIsHT, u8 rate)
{
u8 ret_rate = 0x02;
if (!bIsHT) {
switch (rate) {
case DESC90_RATE1M:
ret_rate = MGN_1M;
break;
case DESC90_RATE2M:
ret_rate = MGN_2M;
break;
case DESC90_RATE5_5M:
ret_rate = MGN_5_5M;
break;
case DESC90_RATE11M:
ret_rate = MGN_11M;
break;
case DESC90_RATE6M:
ret_rate = MGN_6M;
break;
case DESC90_RATE9M:
ret_rate = MGN_9M;
break;
case DESC90_RATE12M:
ret_rate = MGN_12M;
break;
case DESC90_RATE18M:
ret_rate = MGN_18M;
break;
case DESC90_RATE24M:
ret_rate = MGN_24M;
break;
case DESC90_RATE36M:
ret_rate = MGN_36M;
break;
case DESC90_RATE48M:
ret_rate = MGN_48M;
break;
case DESC90_RATE54M:
ret_rate = MGN_54M;
break;
}
} else {
switch (rate) {
case DESC90_RATEMCS0:
ret_rate = MGN_MCS0;
break;
case DESC90_RATEMCS1:
ret_rate = MGN_MCS1;
break;
case DESC90_RATEMCS2:
ret_rate = MGN_MCS2;
break;
case DESC90_RATEMCS3:
ret_rate = MGN_MCS3;
break;
case DESC90_RATEMCS4:
ret_rate = MGN_MCS4;
break;
case DESC90_RATEMCS5:
ret_rate = MGN_MCS5;
break;
case DESC90_RATEMCS6:
ret_rate = MGN_MCS6;
break;
case DESC90_RATEMCS7:
ret_rate = MGN_MCS7;
break;
case DESC90_RATEMCS8:
ret_rate = MGN_MCS8;
break;
case DESC90_RATEMCS9:
ret_rate = MGN_MCS9;
break;
case DESC90_RATEMCS10:
ret_rate = MGN_MCS10;
break;
case DESC90_RATEMCS11:
ret_rate = MGN_MCS11;
break;
case DESC90_RATEMCS12:
ret_rate = MGN_MCS12;
break;
case DESC90_RATEMCS13:
ret_rate = MGN_MCS13;
break;
case DESC90_RATEMCS14:
ret_rate = MGN_MCS14;
break;
case DESC90_RATEMCS15:
ret_rate = MGN_MCS15;
break;
case DESC90_RATEMCS32:
ret_rate = 0x80 | 0x20;
break;
}
}
return ret_rate;
}
static long _rtl92e_signal_scale_mapping(struct r8192_priv *priv, long currsig)
{
long retsig;
if (currsig >= 61 && currsig <= 100)
retsig = 90 + ((currsig - 60) / 4);
else if (currsig >= 41 && currsig <= 60)
retsig = 78 + ((currsig - 40) / 2);
else if (currsig >= 31 && currsig <= 40)
retsig = 66 + (currsig - 30);
else if (currsig >= 21 && currsig <= 30)
retsig = 54 + (currsig - 20);
else if (currsig >= 5 && currsig <= 20)
retsig = 42 + (((currsig - 5) * 2) / 3);
else if (currsig == 4)
retsig = 36;
else if (currsig == 3)
retsig = 27;
else if (currsig == 2)
retsig = 18;
else if (currsig == 1)
retsig = 9;
else
retsig = currsig;
return retsig;
}
#define rx_hal_is_cck_rate(_pdrvinfo)\
((_pdrvinfo->RxRate == DESC90_RATE1M ||\
_pdrvinfo->RxRate == DESC90_RATE2M ||\
_pdrvinfo->RxRate == DESC90_RATE5_5M ||\
_pdrvinfo->RxRate == DESC90_RATE11M) &&\
!_pdrvinfo->RxHT)
static void _rtl92e_query_rxphystatus(
struct r8192_priv *priv,
struct rtllib_rx_stats *pstats,
struct rx_desc *pdesc,
struct rx_fwinfo *pdrvinfo,
struct rtllib_rx_stats *precord_stats,
bool bpacket_match_bssid,
bool bpacket_toself,
bool bPacketBeacon,
bool bToSelfBA
)
{
struct phy_sts_ofdm_819xpci *pofdm_buf;
struct phy_sts_cck_819xpci *pcck_buf;
u8 *prxpkt;
u8 i, max_spatial_stream, tmp_rxevm;
s8 rx_pwr[4], rx_pwr_all = 0;
s8 rx_evmX;
u8 evm, pwdb_all;
u32 RSSI, total_rssi = 0;
u8 is_cck_rate = 0;
u8 rf_rx_num = 0;
static u8 check_reg824;
static u32 reg824_bit9;
is_cck_rate = rx_hal_is_cck_rate(pdrvinfo);
memset(precord_stats, 0, sizeof(struct rtllib_rx_stats));
pstats->bPacketMatchBSSID = precord_stats->bPacketMatchBSSID =
bpacket_match_bssid;
pstats->bPacketToSelf = precord_stats->bPacketToSelf = bpacket_toself;
pstats->bIsCCK = precord_stats->bIsCCK = is_cck_rate;
pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon;
pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA;
if (check_reg824 == 0) {
reg824_bit9 = rtl92e_get_bb_reg(priv->rtllib->dev,
rFPGA0_XA_HSSIParameter2,
0x200);
check_reg824 = 1;
}
prxpkt = (u8 *)pdrvinfo;
prxpkt += sizeof(struct rx_fwinfo);
pcck_buf = (struct phy_sts_cck_819xpci *)prxpkt;
pofdm_buf = (struct phy_sts_ofdm_819xpci *)prxpkt;
pstats->RxMIMOSignalQuality[0] = -1;
pstats->RxMIMOSignalQuality[1] = -1;
precord_stats->RxMIMOSignalQuality[0] = -1;
precord_stats->RxMIMOSignalQuality[1] = -1;
if (is_cck_rate) {
u8 report;
if (!reg824_bit9) {
report = pcck_buf->cck_agc_rpt & 0xc0;
report >>= 6;
switch (report) {
case 0x3:
rx_pwr_all = -35 - (pcck_buf->cck_agc_rpt &
0x3e);
break;
case 0x2:
rx_pwr_all = -23 - (pcck_buf->cck_agc_rpt &
0x3e);
break;
case 0x1:
rx_pwr_all = -11 - (pcck_buf->cck_agc_rpt &
0x3e);
break;
case 0x0:
rx_pwr_all = 8 - (pcck_buf->cck_agc_rpt & 0x3e);
break;
}
} else {
report = pcck_buf->cck_agc_rpt & 0x60;
report >>= 5;
switch (report) {
case 0x3:
rx_pwr_all = -35 -
((pcck_buf->cck_agc_rpt &
0x1f) << 1);
break;
case 0x2:
rx_pwr_all = -23 -
((pcck_buf->cck_agc_rpt &
0x1f) << 1);
break;
case 0x1:
rx_pwr_all = -11 -
((pcck_buf->cck_agc_rpt &
0x1f) << 1);
break;
case 0x0:
rx_pwr_all = -8 -
((pcck_buf->cck_agc_rpt &
0x1f) << 1);
break;
}
}
pwdb_all = rtl92e_rx_db_to_percent(rx_pwr_all);
pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
pstats->RecvSignalPower = rx_pwr_all;
if (bpacket_match_bssid) {
u8 sq;
if (pstats->RxPWDBAll > 40) {
sq = 100;
} else {
sq = pcck_buf->sq_rpt;
if (pcck_buf->sq_rpt > 64)
sq = 0;
else if (pcck_buf->sq_rpt < 20)
sq = 100;
else
sq = ((64 - sq) * 100) / 44;
}
pstats->SignalQuality = sq;
precord_stats->SignalQuality = sq;
pstats->RxMIMOSignalQuality[0] = sq;
precord_stats->RxMIMOSignalQuality[0] = sq;
pstats->RxMIMOSignalQuality[1] = -1;
precord_stats->RxMIMOSignalQuality[1] = -1;
}
} else {
for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
if (priv->brfpath_rxenable[i])
rf_rx_num++;
rx_pwr[i] = ((pofdm_buf->trsw_gain_X[i] & 0x3F) *
2) - 110;
RSSI = rtl92e_rx_db_to_percent(rx_pwr[i]);
if (priv->brfpath_rxenable[i])
total_rssi += RSSI;
if (bpacket_match_bssid) {
pstats->RxMIMOSignalStrength[i] = RSSI;
precord_stats->RxMIMOSignalStrength[i] = RSSI;
}
}
rx_pwr_all = (((pofdm_buf->pwdb_all) >> 1) & 0x7f) - 106;
pwdb_all = rtl92e_rx_db_to_percent(rx_pwr_all);
pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
pstats->RxPower = precord_stats->RxPower = rx_pwr_all;
pstats->RecvSignalPower = rx_pwr_all;
if (pdrvinfo->RxHT && pdrvinfo->RxRate >= DESC90_RATEMCS8 &&
pdrvinfo->RxRate <= DESC90_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
for (i = 0; i < max_spatial_stream; i++) {
tmp_rxevm = pofdm_buf->rxevm_X[i];
rx_evmX = (s8)(tmp_rxevm);
rx_evmX /= 2;
evm = rtl92e_evm_db_to_percent(rx_evmX);
if (bpacket_match_bssid) {
if (i == 0) {
pstats->SignalQuality = evm & 0xff;
precord_stats->SignalQuality = evm & 0xff;
}
pstats->RxMIMOSignalQuality[i] = evm & 0xff;
precord_stats->RxMIMOSignalQuality[i] = evm & 0xff;
}
}
}
if (is_cck_rate) {
pstats->SignalStrength = precord_stats->SignalStrength =
_rtl92e_signal_scale_mapping(priv,
(long)pwdb_all);
} else {
if (rf_rx_num != 0)
pstats->SignalStrength = precord_stats->SignalStrength =
_rtl92e_signal_scale_mapping(priv,
(long)(total_rssi /= rf_rx_num));
}
}
static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
struct rtllib_rx_stats *prev_st,
struct rtllib_rx_stats *curr_st)
{
bool bcheck = false;
u8 rfpath;
u32 ij, tmp_val;
static u32 slide_rssi_index, slide_rssi_statistics;
static u32 slide_evm_index, slide_evm_statistics;
static u32 last_rssi, last_evm;
static u32 slide_beacon_adc_pwdb_index;
static u32 slide_beacon_adc_pwdb_statistics;
static u32 last_beacon_adc_pwdb;
struct rtllib_hdr_3addr *hdr;
u16 sc;
unsigned int seq;
hdr = (struct rtllib_hdr_3addr *)buffer;
sc = le16_to_cpu(hdr->seq_ctl);
seq = WLAN_GET_SEQ_SEQ(sc);
curr_st->Seq_Num = seq;
if (!prev_st->bIsAMPDU)
bcheck = true;
if (slide_rssi_statistics++ >= PHY_RSSI_SLID_WIN_MAX) {
slide_rssi_statistics = PHY_RSSI_SLID_WIN_MAX;
last_rssi = priv->stats.slide_signal_strength[slide_rssi_index];
priv->stats.slide_rssi_total -= last_rssi;
}
priv->stats.slide_rssi_total += prev_st->SignalStrength;
priv->stats.slide_signal_strength[slide_rssi_index++] =
prev_st->SignalStrength;
if (slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX)
slide_rssi_index = 0;
tmp_val = priv->stats.slide_rssi_total / slide_rssi_statistics;
priv->stats.signal_strength = rtl92e_translate_to_dbm(priv, tmp_val);
curr_st->rssi = priv->stats.signal_strength;
if (!prev_st->bPacketMatchBSSID) {
if (!prev_st->bToSelfBA)
return;
}
if (!bcheck)
return;
if (!prev_st->bIsCCK && prev_st->bPacketToSelf) {
for (rfpath = RF90_PATH_A; rfpath < priv->num_total_rf_path; rfpath++) {
if (priv->stats.rx_rssi_percentage[rfpath] == 0) {
priv->stats.rx_rssi_percentage[rfpath] =
prev_st->RxMIMOSignalStrength[rfpath];
}
if (prev_st->RxMIMOSignalStrength[rfpath] >
priv->stats.rx_rssi_percentage[rfpath]) {
priv->stats.rx_rssi_percentage[rfpath] =
((priv->stats.rx_rssi_percentage[rfpath]
* (RX_SMOOTH - 1)) +
(prev_st->RxMIMOSignalStrength
[rfpath])) / (RX_SMOOTH);
priv->stats.rx_rssi_percentage[rfpath] =
priv->stats.rx_rssi_percentage[rfpath]
+ 1;
} else {
priv->stats.rx_rssi_percentage[rfpath] =
((priv->stats.rx_rssi_percentage[rfpath] *
(RX_SMOOTH - 1)) +
(prev_st->RxMIMOSignalStrength[rfpath])) /
(RX_SMOOTH);
}
}
}
if (prev_st->bPacketBeacon) {
if (slide_beacon_adc_pwdb_statistics++ >=
PHY_Beacon_RSSI_SLID_WIN_MAX) {
slide_beacon_adc_pwdb_statistics =
PHY_Beacon_RSSI_SLID_WIN_MAX;
last_beacon_adc_pwdb = priv->stats.slide_beacon_pwdb
[slide_beacon_adc_pwdb_index];
priv->stats.slide_beacon_total -= last_beacon_adc_pwdb;
}
priv->stats.slide_beacon_total += prev_st->RxPWDBAll;
priv->stats.slide_beacon_pwdb[slide_beacon_adc_pwdb_index] =
prev_st->RxPWDBAll;
slide_beacon_adc_pwdb_index++;
if (slide_beacon_adc_pwdb_index >= PHY_Beacon_RSSI_SLID_WIN_MAX)
slide_beacon_adc_pwdb_index = 0;
prev_st->RxPWDBAll = priv->stats.slide_beacon_total /
slide_beacon_adc_pwdb_statistics;
if (prev_st->RxPWDBAll >= 3)
prev_st->RxPWDBAll -= 3;
}
if (prev_st->bPacketToSelf || prev_st->bPacketBeacon ||
prev_st->bToSelfBA) {
if (priv->undecorated_smoothed_pwdb < 0)
priv->undecorated_smoothed_pwdb = prev_st->RxPWDBAll;
if (prev_st->RxPWDBAll > (u32)priv->undecorated_smoothed_pwdb) {
priv->undecorated_smoothed_pwdb =
(((priv->undecorated_smoothed_pwdb) *
(RX_SMOOTH - 1)) +
(prev_st->RxPWDBAll)) / (RX_SMOOTH);
priv->undecorated_smoothed_pwdb =
priv->undecorated_smoothed_pwdb + 1;
} else {
priv->undecorated_smoothed_pwdb =
(((priv->undecorated_smoothed_pwdb) *
(RX_SMOOTH - 1)) +
(prev_st->RxPWDBAll)) / (RX_SMOOTH);
}
rtl92e_update_rx_statistics(priv, prev_st);
}
if (prev_st->SignalQuality != 0) {
if (prev_st->bPacketToSelf || prev_st->bPacketBeacon ||
prev_st->bToSelfBA) {
if (slide_evm_statistics++ >= PHY_RSSI_SLID_WIN_MAX) {
slide_evm_statistics = PHY_RSSI_SLID_WIN_MAX;
last_evm =
priv->stats.slide_evm[slide_evm_index];
priv->stats.slide_evm_total -= last_evm;
}
priv->stats.slide_evm_total += prev_st->SignalQuality;
priv->stats.slide_evm[slide_evm_index++] =
prev_st->SignalQuality;
if (slide_evm_index >= PHY_RSSI_SLID_WIN_MAX)
slide_evm_index = 0;
tmp_val = priv->stats.slide_evm_total /
slide_evm_statistics;
priv->stats.last_signal_strength_inpercent = tmp_val;
}
if (prev_st->bPacketToSelf ||
prev_st->bPacketBeacon ||
prev_st->bToSelfBA) {
for (ij = 0; ij < 2; ij++) {
if (prev_st->RxMIMOSignalQuality[ij] != -1) {
if (priv->stats.rx_evm_percentage[ij] == 0)
priv->stats.rx_evm_percentage[ij] =
prev_st->RxMIMOSignalQuality[ij];
priv->stats.rx_evm_percentage[ij] =
((priv->stats.rx_evm_percentage[ij] *
(RX_SMOOTH - 1)) +
(prev_st->RxMIMOSignalQuality[ij])) /
(RX_SMOOTH);
}
}
}
}
}
static void _rtl92e_translate_rx_signal_stats(struct net_device *dev,
struct sk_buff *skb,
struct rtllib_rx_stats *pstats,
struct rx_desc *pdesc,
struct rx_fwinfo *pdrvinfo)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
bool bpacket_match_bssid, bpacket_toself;
bool bPacketBeacon = false;
struct rtllib_hdr_3addr *hdr;
bool bToSelfBA = false;
static struct rtllib_rx_stats previous_stats;
u16 fc, type;
u8 *tmp_buf;
u8 *praddr;
tmp_buf = skb->data + pstats->RxDrvInfoSize + pstats->RxBufShift;
hdr = (struct rtllib_hdr_3addr *)tmp_buf;
fc = le16_to_cpu(hdr->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
bpacket_match_bssid =
((type != RTLLIB_FTYPE_CTL) &&
ether_addr_equal(priv->rtllib->current_network.bssid,
(fc & RTLLIB_FCTL_TODS) ? hdr->addr1 :
(fc & RTLLIB_FCTL_FROMDS) ? hdr->addr2 :
hdr->addr3) &&
(!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV));
bpacket_toself = bpacket_match_bssid && /* check this */
ether_addr_equal(praddr, priv->rtllib->dev->dev_addr);
if (WLAN_FC_GET_FRAMETYPE(fc) == RTLLIB_STYPE_BEACON)
bPacketBeacon = true;
_rtl92e_process_phyinfo(priv, tmp_buf, &previous_stats, pstats);
_rtl92e_query_rxphystatus(priv, pstats, pdesc, pdrvinfo,
&previous_stats, bpacket_match_bssid,
bpacket_toself, bPacketBeacon, bToSelfBA);
rtl92e_copy_mpdu_stats(pstats, &previous_stats);
}
static void _rtl92e_update_received_rate_histogram_stats(
struct net_device *dev,
struct rtllib_rx_stats *pstats)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u32 rcvType = 1;
u32 rateIndex;
if (pstats->bCRC)
rcvType = 2;
else if (pstats->bICV)
rcvType = 3;
switch (pstats->rate) {
case MGN_1M:
rateIndex = 0;
break;
case MGN_2M:
rateIndex = 1;
break;
case MGN_5_5M:
rateIndex = 2;
break;
case MGN_11M:
rateIndex = 3;
break;
case MGN_6M:
rateIndex = 4;
break;
case MGN_9M:
rateIndex = 5;
break;
case MGN_12M:
rateIndex = 6;
break;
case MGN_18M:
rateIndex = 7;
break;
case MGN_24M:
rateIndex = 8;
break;
case MGN_36M:
rateIndex = 9;
break;
case MGN_48M:
rateIndex = 10;
break;
case MGN_54M:
rateIndex = 11;
break;
case MGN_MCS0:
rateIndex = 12;
break;
case MGN_MCS1:
rateIndex = 13;
break;
case MGN_MCS2:
rateIndex = 14;
break;
case MGN_MCS3:
rateIndex = 15;
break;
case MGN_MCS4:
rateIndex = 16;
break;
case MGN_MCS5:
rateIndex = 17;
break;
case MGN_MCS6:
rateIndex = 18;
break;
case MGN_MCS7:
rateIndex = 19;
break;
case MGN_MCS8:
rateIndex = 20;
break;
case MGN_MCS9:
rateIndex = 21;
break;
case MGN_MCS10:
rateIndex = 22;
break;
case MGN_MCS11:
rateIndex = 23;
break;
case MGN_MCS12:
rateIndex = 24;
break;
case MGN_MCS13:
rateIndex = 25;
break;
case MGN_MCS14:
rateIndex = 26;
break;
case MGN_MCS15:
rateIndex = 27;
break;
default:
rateIndex = 28;
break;
}
priv->stats.received_rate_histogram[0][rateIndex]++;
priv->stats.received_rate_histogram[rcvType][rateIndex]++;
}
bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
struct rx_desc *pdesc, struct sk_buff *skb)
{
struct rx_fwinfo *pDrvInfo = NULL;
stats->bICV = pdesc->ICV;
stats->bCRC = pdesc->CRC32;
stats->bHwError = pdesc->CRC32 | pdesc->ICV;
stats->Length = pdesc->Length;
if (stats->Length < 24)
stats->bHwError |= 1;
if (stats->bHwError) {
stats->bShift = false;
return false;
}
stats->RxDrvInfoSize = pdesc->RxDrvInfoSize;
stats->RxBufShift = (pdesc->Shift) & 0x03;
stats->Decrypted = !pdesc->SWDec;
pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->RxBufShift);
stats->rate = _rtl92e_rate_hw_to_mgn((bool)pDrvInfo->RxHT,
pDrvInfo->RxRate);
stats->bShortPreamble = pDrvInfo->SPLCP;
_rtl92e_update_received_rate_histogram_stats(dev, stats);
stats->bIsAMPDU = (pDrvInfo->PartAggr == 1);
stats->bFirstMPDU = (pDrvInfo->PartAggr == 1) &&
(pDrvInfo->FirstAGGR == 1);
stats->TimeStampLow = pDrvInfo->TSFL;
stats->TimeStampHigh = rtl92e_readl(dev, TSFR + 4);
rtl92e_update_rx_pkt_timestamp(dev, stats);
if ((stats->RxBufShift + stats->RxDrvInfoSize) > 0)
stats->bShift = 1;
stats->RxIs40MHzPacket = pDrvInfo->BW;
_rtl92e_translate_rx_signal_stats(dev, skb, stats, pdesc, pDrvInfo);
skb_trim(skb, skb->len - S_CRC_LEN);
stats->packetlength = stats->Length - 4;
stats->fraglength = stats->packetlength;
stats->fragoffset = 0;
stats->ntotalfrag = 1;
return true;
}
void rtl92e_stop_adapter(struct net_device *dev, bool reset)
{
struct r8192_priv *priv = rtllib_priv(dev);
int i;
u8 OpMode;
u8 u1bTmp;
u32 ulRegRead;
OpMode = RT_OP_MODE_NO_LINK;
priv->rtllib->SetHwRegHandler(dev, HW_VAR_MEDIA_STATUS, &OpMode);
if (!priv->rtllib->bSupportRemoteWakeUp) {
u1bTmp = 0x0;
rtl92e_writeb(dev, CMDR, u1bTmp);
}
mdelay(20);
if (!reset) {
mdelay(150);
priv->hw_rf_off_action = 2;
if (!priv->rtllib->bSupportRemoteWakeUp) {
rtl92e_set_rf_off(dev);
ulRegRead = rtl92e_readl(dev, CPU_GEN);
ulRegRead |= CPU_GEN_SYSTEM_RESET;
rtl92e_writel(dev, CPU_GEN, ulRegRead);
} else {
rtl92e_writel(dev, WFCRC0, 0xffffffff);
rtl92e_writel(dev, WFCRC1, 0xffffffff);
rtl92e_writel(dev, WFCRC2, 0xffffffff);
rtl92e_writeb(dev, PMR, 0x5);
rtl92e_writeb(dev, MAC_BLK_CTRL, 0xa);
}
}
for (i = 0; i < MAX_QUEUE_SIZE; i++)
skb_queue_purge(&priv->rtllib->skb_waitQ[i]);
skb_queue_purge(&priv->skb_queue);
}
void rtl92e_update_ratr_table(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
u8 *pMcsRate = ieee->dot11ht_oper_rate_set;
u32 ratr_value = 0;
u16 rate_config = 0;
u8 rate_index = 0;
rtl92e_config_rate(dev, &rate_config);
ratr_value = rate_config | *pMcsRate << 12;
switch (ieee->mode) {
case WIRELESS_MODE_B:
ratr_value &= 0x0000000F;
break;
case WIRELESS_MODE_G:
case WIRELESS_MODE_G | WIRELESS_MODE_B:
ratr_value &= 0x00000FF7;
break;
case WIRELESS_MODE_N_24G:
if (ieee->ht_info->peer_mimo_ps == 0)
ratr_value &= 0x0007F007;
else
ratr_value &= 0x000FF007;
break;
default:
break;
}
ratr_value &= 0x0FFFFFFF;
if (ieee->ht_info->cur_tx_bw40mhz &&
ieee->ht_info->bCurShortGI40MHz)
ratr_value |= 0x80000000;
else if (!ieee->ht_info->cur_tx_bw40mhz &&
ieee->ht_info->bCurShortGI20MHz)
ratr_value |= 0x80000000;
rtl92e_writel(dev, RATR0 + rate_index * 4, ratr_value);
rtl92e_writeb(dev, UFWP, 1);
}
void
rtl92e_init_variables(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
strscpy(priv->nick, "rtl8192E", sizeof(priv->nick));
priv->rtllib->softmac_features = IEEE_SOFTMAC_SCAN |
IEEE_SOFTMAC_ASSOCIATE | IEEE_SOFTMAC_PROBERQ |
IEEE_SOFTMAC_PROBERS | IEEE_SOFTMAC_TX_QUEUE;
priv->rtllib->tx_headroom = sizeof(struct tx_fwinfo_8190pci);
priv->short_retry_limit = 0x30;
priv->long_retry_limit = 0x30;
priv->receive_config = RCR_ADD3 |
RCR_AMF | RCR_ADF |
RCR_AICV |
RCR_AB | RCR_AM | RCR_APM |
RCR_AAP | ((u32)7 << RCR_MXDMA_OFFSET) |
((u32)7 << RCR_FIFO_OFFSET) | RCR_ONLYERLPKT;
priv->irq_mask[0] = (u32)(IMR_ROK | IMR_VODOK | IMR_VIDOK |
IMR_BEDOK | IMR_BKDOK | IMR_HCCADOK |
IMR_MGNTDOK | IMR_COMDOK | IMR_HIGHDOK |
IMR_BDOK | IMR_RXCMDOK | IMR_TIMEOUT0 |
IMR_RDU | IMR_RXFOVW | IMR_TXFOVW |
IMR_BcnInt | IMR_TBDOK | IMR_TBDER);
priv->bfirst_after_down = false;
}
void rtl92e_enable_irq(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
priv->irq_enabled = 1;
rtl92e_writel(dev, INTA_MASK, priv->irq_mask[0]);
}
void rtl92e_disable_irq(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
rtl92e_writel(dev, INTA_MASK, 0);
priv->irq_enabled = 0;
}
void rtl92e_enable_rx(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
rtl92e_writel(dev, RDQDA, priv->rx_ring_dma[RX_MPDU_QUEUE]);
}
static const u32 TX_DESC_BASE[] = {
BKQDA, BEQDA, VIQDA, VOQDA, HCCAQDA, CQDA, MQDA, HQDA, BQDA
};
void rtl92e_enable_tx(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u32 i;
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
rtl92e_writel(dev, TX_DESC_BASE[i], priv->tx_ring[i].dma);
}
void rtl92e_ack_irq(struct net_device *dev, u32 *p_inta)
{
*p_inta = rtl92e_readl(dev, ISR);
rtl92e_writel(dev, ISR, *p_inta);
}
bool rtl92e_is_rx_stuck(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u16 RegRxCounter = rtl92e_readw(dev, 0x130);
bool bStuck = false;
static u8 rx_chk_cnt;
u32 SlotIndex = 0, TotalRxStuckCount = 0;
u8 i;
u8 SilentResetRxSoltNum = 4;
rx_chk_cnt++;
if (priv->undecorated_smoothed_pwdb >= (RATE_ADAPTIVE_TH_HIGH + 5)) {
rx_chk_cnt = 0;
} else if ((priv->undecorated_smoothed_pwdb < (RATE_ADAPTIVE_TH_HIGH + 5))
&& (((priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) &&
(priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_40M))
|| ((priv->current_chnl_bw == HT_CHANNEL_WIDTH_20) &&
(priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_20M)))) {
if (rx_chk_cnt < 2)
return bStuck;
rx_chk_cnt = 0;
} else if ((((priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) &&
(priv->undecorated_smoothed_pwdb < RATE_ADAPTIVE_TH_LOW_40M)) ||
((priv->current_chnl_bw == HT_CHANNEL_WIDTH_20) &&
(priv->undecorated_smoothed_pwdb < RATE_ADAPTIVE_TH_LOW_20M))) &&
priv->undecorated_smoothed_pwdb >= VERY_LOW_RSSI) {
if (rx_chk_cnt < 4)
return bStuck;
rx_chk_cnt = 0;
} else {
if (rx_chk_cnt < 8)
return bStuck;
rx_chk_cnt = 0;
}
SlotIndex = (priv->silent_reset_rx_slot_index++) % SilentResetRxSoltNum;
if (priv->rx_ctr == RegRxCounter) {
priv->silent_reset_rx_stuck_event[SlotIndex] = 1;
for (i = 0; i < SilentResetRxSoltNum; i++)
TotalRxStuckCount += priv->silent_reset_rx_stuck_event[i];
if (TotalRxStuckCount == SilentResetRxSoltNum) {
bStuck = true;
for (i = 0; i < SilentResetRxSoltNum; i++)
TotalRxStuckCount +=
priv->silent_reset_rx_stuck_event[i];
}
} else {
priv->silent_reset_rx_stuck_event[SlotIndex] = 0;
}
priv->rx_ctr = RegRxCounter;
return bStuck;
}
bool rtl92e_is_tx_stuck(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
bool bStuck = false;
u16 RegTxCounter = rtl92e_readw(dev, 0x128);
if (priv->tx_counter == RegTxCounter)
bStuck = true;
priv->tx_counter = RegTxCounter;
return bStuck;
}
bool rtl92e_get_nmode_support_by_sec(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
if (ieee->rtllib_ap_sec_type &&
(ieee->rtllib_ap_sec_type(priv->rtllib) & (SEC_ALG_WEP |
SEC_ALG_TKIP))) {
return false;
} else {
return true;
}
}
bool rtl92e_is_halfn_supported_by_ap(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
return ieee->bHalfWirelessN24GMode;
}
| linux-master | drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information: wlanfae <[email protected]>
*/
#include <linux/bitops.h>
#include "rtl_core.h"
#include "r8192E_hw.h"
#include "r8192E_phyreg.h"
#include "r8190P_rtl8256.h"
#include "r8192E_phy.h"
#include "rtl_dm.h"
#include "table.h"
/*************************Define local function prototype**********************/
static u32 _rtl92e_phy_rf_fw_read(struct net_device *dev,
enum rf90_radio_path eRFPath, u32 Offset);
static void _rtl92e_phy_rf_fw_write(struct net_device *dev,
enum rf90_radio_path eRFPath, u32 Offset,
u32 Data);
static u32 _rtl92e_calculate_bit_shift(u32 dwBitMask)
{
if (!dwBitMask)
return 32;
return ffs(dwBitMask) - 1;
}
void rtl92e_set_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask,
u32 dwData)
{
u32 OriginalValue, BitShift, NewValue;
if (dwBitMask != bMaskDWord) {
OriginalValue = rtl92e_readl(dev, dwRegAddr);
BitShift = _rtl92e_calculate_bit_shift(dwBitMask);
NewValue = (OriginalValue & ~dwBitMask) | (dwData << BitShift);
rtl92e_writel(dev, dwRegAddr, NewValue);
} else {
rtl92e_writel(dev, dwRegAddr, dwData);
}
}
u32 rtl92e_get_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask)
{
u32 OriginalValue, BitShift;
OriginalValue = rtl92e_readl(dev, dwRegAddr);
BitShift = _rtl92e_calculate_bit_shift(dwBitMask);
return (OriginalValue & dwBitMask) >> BitShift;
}
static u32 _rtl92e_phy_rf_read(struct net_device *dev,
enum rf90_radio_path eRFPath, u32 Offset)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 ret = 0;
u32 NewOffset = 0;
struct bb_reg_definition *pPhyReg = &priv->phy_reg_def[eRFPath];
Offset &= 0x3f;
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
if (Offset >= 31) {
priv->rf_reg_0value[eRFPath] |= 0x140;
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
(priv->rf_reg_0value[eRFPath] << 16));
NewOffset = Offset - 30;
} else if (Offset >= 16) {
priv->rf_reg_0value[eRFPath] |= 0x100;
priv->rf_reg_0value[eRFPath] &= (~0x40);
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
(priv->rf_reg_0value[eRFPath] << 16));
NewOffset = Offset - 15;
} else {
NewOffset = Offset;
}
rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress,
NewOffset);
rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0);
rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1);
mdelay(1);
ret = rtl92e_get_bb_reg(dev, pPhyReg->rfLSSIReadBack,
bLSSIReadBackData);
priv->rf_reg_0value[eRFPath] &= 0xebf;
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset, bMaskDWord,
(priv->rf_reg_0value[eRFPath] << 16));
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
return ret;
}
static void _rtl92e_phy_rf_write(struct net_device *dev,
enum rf90_radio_path eRFPath, u32 Offset,
u32 Data)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 DataAndAddr = 0, NewOffset = 0;
struct bb_reg_definition *pPhyReg = &priv->phy_reg_def[eRFPath];
Offset &= 0x3f;
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
if (Offset >= 31) {
priv->rf_reg_0value[eRFPath] |= 0x140;
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
(priv->rf_reg_0value[eRFPath] << 16));
NewOffset = Offset - 30;
} else if (Offset >= 16) {
priv->rf_reg_0value[eRFPath] |= 0x100;
priv->rf_reg_0value[eRFPath] &= (~0x40);
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
(priv->rf_reg_0value[eRFPath] << 16));
NewOffset = Offset - 15;
} else {
NewOffset = Offset;
}
DataAndAddr = (NewOffset & 0x3f) | (Data << 16);
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
if (Offset == 0x0)
priv->rf_reg_0value[eRFPath] = Data;
if (Offset != 0) {
priv->rf_reg_0value[eRFPath] &= 0xebf;
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
(priv->rf_reg_0value[eRFPath] << 16));
}
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
}
void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
u32 RegAddr, u32 BitMask, u32 Data)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 Original_Value, BitShift, New_Value;
if (priv->rtllib->rf_power_state != rf_on && !priv->being_init_adapter)
return;
if (priv->rf_mode == RF_OP_By_FW) {
if (BitMask != bMask12Bits) {
Original_Value = _rtl92e_phy_rf_fw_read(dev, eRFPath,
RegAddr);
BitShift = _rtl92e_calculate_bit_shift(BitMask);
New_Value = (Original_Value & ~BitMask) | (Data << BitShift);
_rtl92e_phy_rf_fw_write(dev, eRFPath, RegAddr,
New_Value);
} else {
_rtl92e_phy_rf_fw_write(dev, eRFPath, RegAddr, Data);
}
udelay(200);
} else {
if (BitMask != bMask12Bits) {
Original_Value = _rtl92e_phy_rf_read(dev, eRFPath,
RegAddr);
BitShift = _rtl92e_calculate_bit_shift(BitMask);
New_Value = (Original_Value & ~BitMask) | (Data << BitShift);
_rtl92e_phy_rf_write(dev, eRFPath, RegAddr, New_Value);
} else {
_rtl92e_phy_rf_write(dev, eRFPath, RegAddr, Data);
}
}
}
u32 rtl92e_get_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
u32 RegAddr, u32 BitMask)
{
u32 Original_Value, Readback_Value, BitShift;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->rtllib->rf_power_state != rf_on && !priv->being_init_adapter)
return 0;
mutex_lock(&priv->rf_mutex);
if (priv->rf_mode == RF_OP_By_FW) {
Original_Value = _rtl92e_phy_rf_fw_read(dev, eRFPath, RegAddr);
udelay(200);
} else {
Original_Value = _rtl92e_phy_rf_read(dev, eRFPath, RegAddr);
}
BitShift = _rtl92e_calculate_bit_shift(BitMask);
Readback_Value = (Original_Value & BitMask) >> BitShift;
mutex_unlock(&priv->rf_mutex);
return Readback_Value;
}
static u32 _rtl92e_phy_rf_fw_read(struct net_device *dev,
enum rf90_radio_path eRFPath, u32 Offset)
{
u32 Data = 0;
u8 time = 0;
Data |= ((Offset & 0xFF) << 12);
Data |= ((eRFPath & 0x3) << 20);
Data |= 0x80000000;
while (rtl92e_readl(dev, QPNR) & 0x80000000) {
if (time++ < 100)
udelay(10);
else
break;
}
rtl92e_writel(dev, QPNR, Data);
while (rtl92e_readl(dev, QPNR) & 0x80000000) {
if (time++ < 100)
udelay(10);
else
return 0;
}
return rtl92e_readl(dev, RF_DATA);
}
static void _rtl92e_phy_rf_fw_write(struct net_device *dev,
enum rf90_radio_path eRFPath, u32 Offset,
u32 Data)
{
u8 time = 0;
Data |= ((Offset & 0xFF) << 12);
Data |= ((eRFPath & 0x3) << 20);
Data |= 0x400000;
Data |= 0x80000000;
while (rtl92e_readl(dev, QPNR) & 0x80000000) {
if (time++ < 100)
udelay(10);
else
break;
}
rtl92e_writel(dev, QPNR, Data);
}
void rtl92e_config_mac(struct net_device *dev)
{
u32 dwArrayLen = 0, i = 0;
u32 *pdwArray = NULL;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->tx_pwr_data_read_from_eeprom) {
dwArrayLen = RTL8192E_MACPHY_ARR_PG_LEN;
pdwArray = RTL8192E_MACPHY_ARR_PG;
} else {
dwArrayLen = RTL8192E_MACPHY_ARR_LEN;
pdwArray = RTL8192E_MACPHY_ARR;
}
for (i = 0; i < dwArrayLen; i += 3) {
if (pdwArray[i] == 0x318)
pdwArray[i + 2] = 0x00000800;
rtl92e_set_bb_reg(dev, pdwArray[i], pdwArray[i + 1],
pdwArray[i + 2]);
}
}
static void _rtl92e_phy_config_bb(struct net_device *dev, u8 ConfigType)
{
int i;
u32 *Rtl819XPHY_REGArray_Table = NULL;
u32 *Rtl819XAGCTAB_Array_Table = NULL;
u16 AGCTAB_ArrayLen, PHY_REGArrayLen = 0;
AGCTAB_ArrayLen = RTL8192E_AGCTAB_ARR_LEN;
Rtl819XAGCTAB_Array_Table = RTL8192E_AGCTAB_ARR;
PHY_REGArrayLen = RTL8192E_PHY_REG_1T2R_ARR_LEN;
Rtl819XPHY_REGArray_Table = RTL8192E_PHY_REG_1T2R_ARR;
if (ConfigType == BB_CONFIG_PHY_REG) {
for (i = 0; i < PHY_REGArrayLen; i += 2) {
rtl92e_set_bb_reg(dev, Rtl819XPHY_REGArray_Table[i],
bMaskDWord,
Rtl819XPHY_REGArray_Table[i + 1]);
}
} else if (ConfigType == BB_CONFIG_AGC_TAB) {
for (i = 0; i < AGCTAB_ArrayLen; i += 2) {
rtl92e_set_bb_reg(dev, Rtl819XAGCTAB_Array_Table[i],
bMaskDWord,
Rtl819XAGCTAB_Array_Table[i + 1]);
}
}
}
static void _rtl92e_init_bb_rf_reg_def(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->phy_reg_def[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW;
priv->phy_reg_def[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW;
priv->phy_reg_def[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE;
priv->phy_reg_def[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE;
priv->phy_reg_def[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE;
priv->phy_reg_def[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE;
priv->phy_reg_def[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter;
priv->phy_reg_def[RF90_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
priv->phy_reg_def[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2;
priv->phy_reg_def[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2;
priv->phy_reg_def[RF90_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
priv->phy_reg_def[RF90_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
}
bool rtl92e_check_bb_and_rf(struct net_device *dev, enum hw90_block CheckBlock,
enum rf90_radio_path eRFPath)
{
bool ret = true;
u32 i, CheckTimes = 4, dwRegRead = 0;
u32 WriteAddr[4];
u32 WriteData[] = {0xfffff027, 0xaa55a02f, 0x00000027, 0x55aa502f};
WriteAddr[HW90_BLOCK_MAC] = 0x100;
WriteAddr[HW90_BLOCK_PHY0] = 0x900;
WriteAddr[HW90_BLOCK_PHY1] = 0x800;
WriteAddr[HW90_BLOCK_RF] = 0x3;
if (CheckBlock == HW90_BLOCK_MAC) {
netdev_warn(dev, "%s(): No checks available for MAC block.\n",
__func__);
return ret;
}
for (i = 0; i < CheckTimes; i++) {
switch (CheckBlock) {
case HW90_BLOCK_PHY0:
case HW90_BLOCK_PHY1:
rtl92e_writel(dev, WriteAddr[CheckBlock],
WriteData[i]);
dwRegRead = rtl92e_readl(dev, WriteAddr[CheckBlock]);
break;
case HW90_BLOCK_RF:
WriteData[i] &= 0xfff;
rtl92e_set_rf_reg(dev, eRFPath,
WriteAddr[HW90_BLOCK_RF],
bMask12Bits, WriteData[i]);
mdelay(10);
dwRegRead = rtl92e_get_rf_reg(dev, eRFPath,
WriteAddr[HW90_BLOCK_RF],
bMaskDWord);
mdelay(10);
break;
default:
ret = false;
break;
}
if (dwRegRead != WriteData[i]) {
netdev_warn(dev, "%s(): Check failed.\n", __func__);
ret = false;
break;
}
}
return ret;
}
static bool _rtl92e_bb_config_para_file(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
bool rtStatus = true;
u8 bRegValue = 0, eCheckItem = 0;
u32 dwRegValue = 0;
bRegValue = rtl92e_readb(dev, BB_GLOBAL_RESET);
rtl92e_writeb(dev, BB_GLOBAL_RESET, (bRegValue | BB_GLOBAL_RESET_BIT));
dwRegValue = rtl92e_readl(dev, CPU_GEN);
rtl92e_writel(dev, CPU_GEN, (dwRegValue & (~CPU_GEN_BB_RST)));
for (eCheckItem = (enum hw90_block)HW90_BLOCK_PHY0;
eCheckItem <= HW90_BLOCK_PHY1; eCheckItem++) {
rtStatus = rtl92e_check_bb_and_rf(dev,
(enum hw90_block)eCheckItem,
(enum rf90_radio_path)0);
if (!rtStatus)
return rtStatus;
}
rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn | bOFDMEn, 0x0);
_rtl92e_phy_config_bb(dev, BB_CONFIG_PHY_REG);
dwRegValue = rtl92e_readl(dev, CPU_GEN);
rtl92e_writel(dev, CPU_GEN, (dwRegValue | CPU_GEN_BB_RST));
_rtl92e_phy_config_bb(dev, BB_CONFIG_AGC_TAB);
if (priv->ic_cut > VERSION_8190_BD) {
dwRegValue = 0x0;
rtl92e_set_bb_reg(dev, rFPGA0_TxGainStage,
(bXBTxAGC | bXCTxAGC | bXDTxAGC), dwRegValue);
dwRegValue = priv->crystal_cap;
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, bXtalCap92x,
dwRegValue);
}
return rtStatus;
}
bool rtl92e_config_bb(struct net_device *dev)
{
_rtl92e_init_bb_rf_reg_def(dev);
return _rtl92e_bb_config_para_file(dev);
}
void rtl92e_get_tx_power(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->mcs_tx_pwr_level_org_offset[0] =
rtl92e_readl(dev, rTxAGC_Rate18_06);
priv->mcs_tx_pwr_level_org_offset[1] =
rtl92e_readl(dev, rTxAGC_Rate54_24);
priv->mcs_tx_pwr_level_org_offset[2] =
rtl92e_readl(dev, rTxAGC_Mcs03_Mcs00);
priv->mcs_tx_pwr_level_org_offset[3] =
rtl92e_readl(dev, rTxAGC_Mcs07_Mcs04);
priv->mcs_tx_pwr_level_org_offset[4] =
rtl92e_readl(dev, rTxAGC_Mcs11_Mcs08);
priv->mcs_tx_pwr_level_org_offset[5] =
rtl92e_readl(dev, rTxAGC_Mcs15_Mcs12);
priv->def_initial_gain[0] = rtl92e_readb(dev, rOFDM0_XAAGCCore1);
priv->def_initial_gain[1] = rtl92e_readb(dev, rOFDM0_XBAGCCore1);
priv->def_initial_gain[2] = rtl92e_readb(dev, rOFDM0_XCAGCCore1);
priv->def_initial_gain[3] = rtl92e_readb(dev, rOFDM0_XDAGCCore1);
priv->framesync = rtl92e_readb(dev, rOFDM0_RxDetector3);
}
void rtl92e_set_tx_power(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 powerlevel = 0, powerlevelOFDM24G = 0;
if (priv->epromtype == EEPROM_93C46) {
powerlevel = priv->tx_pwr_level_cck[channel - 1];
powerlevelOFDM24G = priv->tx_pwr_level_ofdm_24g[channel - 1];
}
rtl92e_set_cck_tx_power(dev, powerlevel);
rtl92e_set_ofdm_tx_power(dev, powerlevelOFDM24G);
}
u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath)
{
int i;
switch (eRFPath) {
case RF90_PATH_A:
for (i = 0; i < RTL8192E_RADIO_A_ARR_LEN; i += 2) {
if (RTL8192E_RADIO_A_ARR[i] == 0xfe) {
msleep(100);
continue;
}
rtl92e_set_rf_reg(dev, eRFPath, RTL8192E_RADIO_A_ARR[i],
bMask12Bits,
RTL8192E_RADIO_A_ARR[i + 1]);
}
break;
case RF90_PATH_B:
for (i = 0; i < RTL8192E_RADIO_B_ARR_LEN; i += 2) {
if (RTL8192E_RADIO_B_ARR[i] == 0xfe) {
msleep(100);
continue;
}
rtl92e_set_rf_reg(dev, eRFPath, RTL8192E_RADIO_B_ARR[i],
bMask12Bits,
RTL8192E_RADIO_B_ARR[i + 1]);
}
break;
default:
break;
}
return 0;
}
static void _rtl92e_set_tx_power_level(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 powerlevel = priv->tx_pwr_level_cck[channel - 1];
u8 powerlevelOFDM24G = priv->tx_pwr_level_ofdm_24g[channel - 1];
rtl92e_set_cck_tx_power(dev, powerlevel);
rtl92e_set_ofdm_tx_power(dev, powerlevelOFDM24G);
}
static u8 _rtl92e_phy_set_sw_chnl_cmd_array(struct net_device *dev,
struct sw_chnl_cmd *CmdTable,
u32 CmdTableIdx, u32 CmdTableSz,
enum sw_chnl_cmd_id CmdID,
u32 Para1, u32 Para2, u32 msDelay)
{
struct sw_chnl_cmd *pCmd;
if (CmdTable == NULL) {
netdev_err(dev, "%s(): CmdTable cannot be NULL.\n", __func__);
return false;
}
if (CmdTableIdx >= CmdTableSz) {
netdev_err(dev, "%s(): Invalid index requested.\n", __func__);
return false;
}
pCmd = CmdTable + CmdTableIdx;
pCmd->CmdID = CmdID;
pCmd->Para1 = Para1;
pCmd->Para2 = Para2;
pCmd->msDelay = msDelay;
return true;
}
static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
u8 *stage, u8 *step, u32 *delay)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
u32 PreCommonCmdCnt;
u32 PostCommonCmdCnt;
u32 RfDependCmdCnt;
struct sw_chnl_cmd *CurrentCmd = NULL;
u8 eRFPath;
if (!rtllib_legal_channel(priv->rtllib, channel)) {
netdev_err(dev, "Invalid channel requested: %d\n", channel);
return true;
}
{
PreCommonCmdCnt = 0;
_rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PreCommonCmd,
PreCommonCmdCnt++,
MAX_PRECMD_CNT,
CmdID_SetTxPowerLevel,
0, 0, 0);
_rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PreCommonCmd,
PreCommonCmdCnt++,
MAX_PRECMD_CNT, CmdID_End,
0, 0, 0);
PostCommonCmdCnt = 0;
_rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PostCommonCmd,
PostCommonCmdCnt++,
MAX_POSTCMD_CNT, CmdID_End,
0, 0, 0);
RfDependCmdCnt = 0;
if (!(channel >= 1 && channel <= 14)) {
netdev_err(dev,
"Invalid channel requested for 8256: %d\n",
channel);
return false;
}
_rtl92e_phy_set_sw_chnl_cmd_array(dev,
ieee->RfDependCmd,
RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
CmdID_RF_WriteReg,
rZebra1_Channel,
channel, 10);
_rtl92e_phy_set_sw_chnl_cmd_array(dev,
ieee->RfDependCmd,
RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
CmdID_End, 0, 0, 0);
do {
switch (*stage) {
case 0:
CurrentCmd = &ieee->PreCommonCmd[*step];
break;
case 1:
CurrentCmd = &ieee->RfDependCmd[*step];
break;
case 2:
CurrentCmd = &ieee->PostCommonCmd[*step];
break;
}
if (CurrentCmd && CurrentCmd->CmdID == CmdID_End) {
if ((*stage) == 2)
return true;
(*stage)++;
(*step) = 0;
continue;
}
if (!CurrentCmd)
continue;
switch (CurrentCmd->CmdID) {
case CmdID_SetTxPowerLevel:
if (priv->ic_cut > VERSION_8190_BD)
_rtl92e_set_tx_power_level(dev,
channel);
break;
case CmdID_WritePortUlong:
rtl92e_writel(dev, CurrentCmd->Para1,
CurrentCmd->Para2);
break;
case CmdID_WritePortUshort:
rtl92e_writew(dev, CurrentCmd->Para1,
CurrentCmd->Para2);
break;
case CmdID_WritePortUchar:
rtl92e_writeb(dev, CurrentCmd->Para1,
CurrentCmd->Para2);
break;
case CmdID_RF_WriteReg:
for (eRFPath = 0; eRFPath <
priv->num_total_rf_path; eRFPath++)
rtl92e_set_rf_reg(dev,
(enum rf90_radio_path)eRFPath,
CurrentCmd->Para1, bMask12Bits,
CurrentCmd->Para2 << 7);
break;
default:
break;
}
break;
} while (true);
} /*for (Number of RF paths)*/
(*delay) = CurrentCmd->msDelay;
(*step)++;
return false;
}
static void _rtl92e_phy_switch_channel(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 delay = 0;
while (!_rtl92e_phy_switch_channel_step(dev, channel,
&priv->sw_chnl_stage,
&priv->sw_chnl_step, &delay)) {
if (delay > 0)
msleep(delay);
if (!priv->up)
break;
}
}
static void _rtl92e_phy_switch_channel_work_item(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
_rtl92e_phy_switch_channel(dev, priv->chan);
}
u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (!priv->up) {
netdev_err(dev, "%s(): Driver is not initialized\n", __func__);
return false;
}
if (priv->sw_chnl_in_progress)
return false;
switch (priv->rtllib->mode) {
case WIRELESS_MODE_B:
if (channel > 14) {
netdev_warn(dev,
"Channel %d not available in 802.11b.\n",
channel);
return false;
}
break;
case WIRELESS_MODE_G:
case WIRELESS_MODE_N_24G:
if (channel > 14) {
netdev_warn(dev,
"Channel %d not available in 802.11g.\n",
channel);
return false;
}
break;
}
priv->sw_chnl_in_progress = true;
if (channel == 0)
channel = 1;
priv->chan = channel;
priv->sw_chnl_stage = 0;
priv->sw_chnl_step = 0;
if (priv->up)
_rtl92e_phy_switch_channel_work_item(dev);
priv->sw_chnl_in_progress = false;
return true;
}
static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
switch (priv->current_chnl_bw) {
case HT_CHANNEL_WIDTH_20:
priv->cck_present_attn =
priv->cck_present_attn_20m_def +
priv->cck_present_attn_diff;
if (priv->cck_present_attn >
(CCK_TX_BB_GAIN_TABLE_LEN - 1))
priv->cck_present_attn =
CCK_TX_BB_GAIN_TABLE_LEN - 1;
if (priv->cck_present_attn < 0)
priv->cck_present_attn = 0;
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else if (priv->rtllib->current_network.channel !=
14 && priv->bcck_in_ch14) {
priv->bcck_in_ch14 = false;
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else {
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
break;
case HT_CHANNEL_WIDTH_20_40:
priv->cck_present_attn =
priv->cck_present_attn_40m_def +
priv->cck_present_attn_diff;
if (priv->cck_present_attn >
(CCK_TX_BB_GAIN_TABLE_LEN - 1))
priv->cck_present_attn =
CCK_TX_BB_GAIN_TABLE_LEN - 1;
if (priv->cck_present_attn < 0)
priv->cck_present_attn = 0;
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else if (priv->rtllib->current_network.channel != 14
&& priv->bcck_in_ch14) {
priv->bcck_in_ch14 = false;
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else {
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
break;
}
}
static void _rtl92e_cck_tx_power_track_bw_switch_thermal(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14)
priv->bcck_in_ch14 = true;
else if (priv->rtllib->current_network.channel != 14 &&
priv->bcck_in_ch14)
priv->bcck_in_ch14 = false;
switch (priv->current_chnl_bw) {
case HT_CHANNEL_WIDTH_20:
if (priv->rec_cck_20m_idx == 0)
priv->rec_cck_20m_idx = 6;
priv->cck_index = priv->rec_cck_20m_idx;
break;
case HT_CHANNEL_WIDTH_20_40:
priv->cck_index = priv->rec_cck_40m_idx;
break;
}
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
static void _rtl92e_cck_tx_power_track_bw_switch(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->ic_cut >= IC_VersionCut_D)
_rtl92e_cck_tx_power_track_bw_switch_tssi(dev);
else
_rtl92e_cck_tx_power_track_bw_switch_thermal(dev);
}
static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 regBwOpMode;
if (!priv->up) {
netdev_err(dev, "%s(): Driver is not initialized\n", __func__);
return;
}
regBwOpMode = rtl92e_readb(dev, BW_OPMODE);
switch (priv->current_chnl_bw) {
case HT_CHANNEL_WIDTH_20:
regBwOpMode |= BW_OPMODE_20MHZ;
rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
break;
case HT_CHANNEL_WIDTH_20_40:
regBwOpMode &= ~BW_OPMODE_20MHZ;
rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
break;
default:
netdev_err(dev, "%s(): unknown Bandwidth: %#X\n", __func__,
priv->current_chnl_bw);
break;
}
switch (priv->current_chnl_bw) {
case HT_CHANNEL_WIDTH_20:
rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
rtl92e_set_bb_reg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
if (!priv->btxpower_tracking) {
rtl92e_writel(dev, rCCK0_TxFilter1, 0x1a1b0000);
rtl92e_writel(dev, rCCK0_TxFilter2, 0x090e1317);
rtl92e_writel(dev, rCCK0_DebugPort, 0x00000204);
} else {
_rtl92e_cck_tx_power_track_bw_switch(dev);
}
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1);
break;
case HT_CHANNEL_WIDTH_20_40:
rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
rtl92e_set_bb_reg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
if (!priv->btxpower_tracking) {
rtl92e_writel(dev, rCCK0_TxFilter1, 0x35360000);
rtl92e_writel(dev, rCCK0_TxFilter2, 0x121c252e);
rtl92e_writel(dev, rCCK0_DebugPort, 0x00000409);
} else {
_rtl92e_cck_tx_power_track_bw_switch(dev);
}
rtl92e_set_bb_reg(dev, rCCK0_System, bCCKSideBand,
(priv->n_cur_40mhz_prime_sc >> 1));
rtl92e_set_bb_reg(dev, rOFDM1_LSTF, 0xC00,
priv->n_cur_40mhz_prime_sc);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
break;
default:
netdev_err(dev, "%s(): unknown Bandwidth: %#X\n", __func__,
priv->current_chnl_bw);
break;
}
rtl92e_set_bandwidth(dev, priv->current_chnl_bw);
atomic_dec(&(priv->rtllib->atm_swbw));
priv->set_bw_mode_in_progress = false;
}
void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->set_bw_mode_in_progress)
return;
atomic_inc(&(priv->rtllib->atm_swbw));
priv->set_bw_mode_in_progress = true;
priv->current_chnl_bw = bandwidth;
if (Offset == HT_EXTCHNL_OFFSET_LOWER)
priv->n_cur_40mhz_prime_sc = HAL_PRIME_CHNL_OFFSET_UPPER;
else if (Offset == HT_EXTCHNL_OFFSET_UPPER)
priv->n_cur_40mhz_prime_sc = HAL_PRIME_CHNL_OFFSET_LOWER;
else
priv->n_cur_40mhz_prime_sc = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
_rtl92e_set_bw_mode_work_item(dev);
}
void rtl92e_init_gain(struct net_device *dev, u8 Operation)
{
#define SCAN_RX_INITIAL_GAIN 0x17
#define POWER_DETECTION_TH 0x08
struct r8192_priv *priv = rtllib_priv(dev);
u32 BitMask;
u8 initial_gain;
if (priv->up) {
switch (Operation) {
case IG_Backup:
initial_gain = SCAN_RX_INITIAL_GAIN;
BitMask = bMaskByte0;
if (dm_digtable.dig_algorithm ==
DIG_ALGO_BY_FALSE_ALARM)
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
priv->initgain_backup.xaagccore1 =
rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1,
BitMask);
priv->initgain_backup.xbagccore1 =
rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1,
BitMask);
priv->initgain_backup.xcagccore1 =
rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1,
BitMask);
priv->initgain_backup.xdagccore1 =
rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1,
BitMask);
BitMask = bMaskByte2;
priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev,
rCCK0_CCA, BitMask);
rtl92e_writeb(dev, rOFDM0_XAAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XBAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XCAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XDAGCCore1, initial_gain);
rtl92e_writeb(dev, 0xa0a, POWER_DETECTION_TH);
break;
case IG_Restore:
BitMask = 0x7f;
if (dm_digtable.dig_algorithm ==
DIG_ALGO_BY_FALSE_ALARM)
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
rtl92e_set_bb_reg(dev, rOFDM0_XAAGCCore1, BitMask,
(u32)priv->initgain_backup.xaagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XBAGCCore1, BitMask,
(u32)priv->initgain_backup.xbagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XCAGCCore1, BitMask,
(u32)priv->initgain_backup.xcagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XDAGCCore1, BitMask,
(u32)priv->initgain_backup.xdagccore1);
BitMask = bMaskByte2;
rtl92e_set_bb_reg(dev, rCCK0_CCA, BitMask,
(u32)priv->initgain_backup.cca);
rtl92e_set_tx_power(dev,
priv->rtllib->current_network.channel);
if (dm_digtable.dig_algorithm ==
DIG_ALGO_BY_FALSE_ALARM)
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
break;
}
}
}
void rtl92e_set_rf_off(struct net_device *dev)
{
rtl92e_set_bb_reg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x0);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0);
rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0);
rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x4, 0x0);
rtl92e_writeb(dev, ANAPAR_FOR_8192PCIE, 0x07);
}
static bool _rtl92e_set_rf_power_state(struct net_device *dev,
enum rt_rf_power_state rf_power_state)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
(&priv->rtllib->pwr_save_ctrl);
bool bResult = true;
u8 i = 0, QueueID = 0;
struct rtl8192_tx_ring *ring = NULL;
if (priv->set_rf_pwr_state_in_progress)
return false;
priv->set_rf_pwr_state_in_progress = true;
switch (rf_power_state) {
case rf_on:
if ((priv->rtllib->rf_power_state == rf_off) &&
RT_IN_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC)) {
bool rtstatus;
u32 InitilizeCount = 3;
do {
InitilizeCount--;
rtstatus = rtl92e_enable_nic(dev);
} while (!rtstatus && (InitilizeCount > 0));
if (!rtstatus) {
netdev_err(dev,
"%s(): Failed to initialize Adapter.\n",
__func__);
priv->set_rf_pwr_state_in_progress = false;
return false;
}
RT_CLEAR_PS_LEVEL(psc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
rtl92e_writeb(dev, ANAPAR, 0x37);
mdelay(1);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1,
0x4, 0x1);
priv->hw_rf_off_action = 0;
rtl92e_set_bb_reg(dev, rFPGA0_XA_RFInterfaceOE,
BIT4, 0x1);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4,
0x300, 0x3);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1,
0x18, 0x3);
rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable,
0x3, 0x3);
rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable,
0x3, 0x3);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1,
0x60, 0x3);
}
break;
case rf_sleep:
if (priv->rtllib->rf_power_state == rf_off)
break;
for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) {
ring = &priv->tx_ring[QueueID];
if (skb_queue_len(&ring->queue) == 0) {
QueueID++;
continue;
} else {
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x)
break;
}
rtl92e_set_rf_off(dev);
break;
case rf_off:
for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) {
ring = &priv->tx_ring[QueueID];
if (skb_queue_len(&ring->queue) == 0) {
QueueID++;
continue;
} else {
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x)
break;
}
rtl92e_set_rf_off(dev);
break;
default:
bResult = false;
netdev_warn(dev,
"%s(): Unknown state requested: 0x%X.\n",
__func__, rf_power_state);
break;
}
if (bResult)
priv->rtllib->rf_power_state = rf_power_state;
priv->set_rf_pwr_state_in_progress = false;
return bResult;
}
bool rtl92e_set_rf_power_state(struct net_device *dev,
enum rt_rf_power_state rf_power_state)
{
struct r8192_priv *priv = rtllib_priv(dev);
bool bResult = false;
if (rf_power_state == priv->rtllib->rf_power_state &&
priv->hw_rf_off_action == 0) {
return bResult;
}
bResult = _rtl92e_set_rf_power_state(dev, rf_power_state);
return bResult;
}
void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->up) {
switch (Operation) {
case SCAN_OPT_BACKUP:
priv->rtllib->init_gain_handler(dev, IG_Backup);
break;
case SCAN_OPT_RESTORE:
priv->rtllib->init_gain_handler(dev, IG_Restore);
break;
}
}
}
| linux-master | drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <[email protected]>, et al.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtl_core.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
#include "r8192E_cmdpkt.h"
void rtl92e_cam_reset(struct net_device *dev)
{
u32 ulcommand = 0;
ulcommand |= BIT31 | BIT30;
rtl92e_writel(dev, RWCAM, ulcommand);
}
void rtl92e_enable_hw_security_config(struct net_device *dev)
{
u8 SECR_value = 0x0;
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
SECR_value = SCR_TxEncEnable | SCR_RxDecEnable;
if (((ieee->pairwise_key_type == KEY_TYPE_WEP40) ||
(ieee->pairwise_key_type == KEY_TYPE_WEP104)) &&
(priv->rtllib->auth_mode != 2)) {
SECR_value |= SCR_RxUseDK;
SECR_value |= SCR_TxUseDK;
} else if ((ieee->iw_mode == IW_MODE_ADHOC) &&
(ieee->pairwise_key_type & (KEY_TYPE_CCMP |
KEY_TYPE_TKIP))) {
SECR_value |= SCR_RxUseDK;
SECR_value |= SCR_TxUseDK;
}
ieee->hwsec_active = 1;
if ((ieee->ht_info->iot_action & HT_IOT_ACT_PURE_N_MODE) || !hwwep) {
ieee->hwsec_active = 0;
SECR_value &= ~SCR_RxDecEnable;
}
rtl92e_writeb(dev, SECR, SECR_value);
}
void rtl92e_set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
u16 KeyType, const u8 *MacAddr, u32 *KeyContent)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
if (EntryNo >= TOTAL_CAM_ENTRY)
return;
ieee->swcamtable[EntryNo].bused = true;
ieee->swcamtable[EntryNo].key_index = KeyIndex;
ieee->swcamtable[EntryNo].key_type = KeyType;
memcpy(ieee->swcamtable[EntryNo].macaddr, MacAddr, 6);
ieee->swcamtable[EntryNo].useDK = 0;
memcpy(ieee->swcamtable[EntryNo].key_buf, (u8 *)KeyContent, 16);
}
void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
u32 *KeyContent)
{
u32 TargetCommand = 0;
u32 TargetContent = 0;
u16 usConfig = 0;
u8 i;
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
enum rt_rf_power_state rt_state;
rt_state = priv->rtllib->rf_power_state;
if (rt_state == rf_off) {
if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
return;
}
mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
mutex_unlock(&priv->rtllib->ips_mutex);
}
priv->rtllib->is_set_key = true;
if (EntryNo >= TOTAL_CAM_ENTRY) {
netdev_info(dev, "%s(): Invalid CAM entry\n", __func__);
return;
}
if (DefaultKey)
usConfig |= BIT15 | (KeyType << 2);
else
usConfig |= BIT15 | (KeyType << 2) | KeyIndex;
for (i = 0; i < CAM_CONTENT_COUNT; i++) {
TargetCommand = i + CAM_CONTENT_COUNT * EntryNo;
TargetCommand |= BIT31 | BIT16;
if (i == 0) {
TargetContent = (u32)(*(MacAddr + 0)) << 16 |
(u32)(*(MacAddr + 1)) << 24 |
(u32)usConfig;
rtl92e_writel(dev, WCAMI, TargetContent);
rtl92e_writel(dev, RWCAM, TargetCommand);
} else if (i == 1) {
TargetContent = (u32)(*(MacAddr + 2)) |
(u32)(*(MacAddr + 3)) << 8 |
(u32)(*(MacAddr + 4)) << 16 |
(u32)(*(MacAddr + 5)) << 24;
rtl92e_writel(dev, WCAMI, TargetContent);
rtl92e_writel(dev, RWCAM, TargetCommand);
} else {
if (KeyContent != NULL) {
rtl92e_writel(dev, WCAMI,
(u32)(*(KeyContent + i - 2)));
rtl92e_writel(dev, RWCAM, TargetCommand);
udelay(100);
}
}
}
}
void rtl92e_cam_restore(struct net_device *dev)
{
u8 EntryId = 0;
struct r8192_priv *priv = rtllib_priv(dev);
u8 *MacAddr = priv->rtllib->current_network.bssid;
static u8 CAM_CONST_ADDR[4][6] = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
};
static u8 CAM_CONST_BROAD[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
if ((priv->rtllib->pairwise_key_type == KEY_TYPE_WEP40) ||
(priv->rtllib->pairwise_key_type == KEY_TYPE_WEP104)) {
for (EntryId = 0; EntryId < 4; EntryId++) {
MacAddr = CAM_CONST_ADDR[EntryId];
if (priv->rtllib->swcamtable[EntryId].bused) {
rtl92e_set_key(dev, EntryId, EntryId,
priv->rtllib->pairwise_key_type,
MacAddr, 0,
(u32 *)(&priv->rtllib->swcamtable
[EntryId].key_buf[0]));
}
}
} else if (priv->rtllib->pairwise_key_type == KEY_TYPE_TKIP) {
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
rtl92e_set_key(dev, 4, 0,
priv->rtllib->pairwise_key_type,
(const u8 *)dev->dev_addr, 0,
(u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
} else {
rtl92e_set_key(dev, 4, 0,
priv->rtllib->pairwise_key_type,
MacAddr, 0,
(u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
}
} else if (priv->rtllib->pairwise_key_type == KEY_TYPE_CCMP) {
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
rtl92e_set_key(dev, 4, 0,
priv->rtllib->pairwise_key_type,
(const u8 *)dev->dev_addr, 0,
(u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
} else {
rtl92e_set_key(dev, 4, 0,
priv->rtllib->pairwise_key_type, MacAddr,
0, (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
}
}
if (priv->rtllib->group_key_type == KEY_TYPE_TKIP) {
MacAddr = CAM_CONST_BROAD;
for (EntryId = 1; EntryId < 4; EntryId++) {
if (priv->rtllib->swcamtable[EntryId].bused) {
rtl92e_set_key(dev, EntryId, EntryId,
priv->rtllib->group_key_type,
MacAddr, 0,
(u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0]));
}
}
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
if (priv->rtllib->swcamtable[0].bused) {
rtl92e_set_key(dev, 0, 0,
priv->rtllib->group_key_type,
CAM_CONST_ADDR[0], 0,
(u32 *)(&priv->rtllib->swcamtable[0].key_buf[0]));
} else {
netdev_warn(dev,
"%s(): ADHOC TKIP: missing key entry.\n",
__func__);
return;
}
}
} else if (priv->rtllib->group_key_type == KEY_TYPE_CCMP) {
MacAddr = CAM_CONST_BROAD;
for (EntryId = 1; EntryId < 4; EntryId++) {
if (priv->rtllib->swcamtable[EntryId].bused) {
rtl92e_set_key(dev, EntryId, EntryId,
priv->rtllib->group_key_type,
MacAddr, 0,
(u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0]));
}
}
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
if (priv->rtllib->swcamtable[0].bused) {
rtl92e_set_key(dev, 0, 0,
priv->rtllib->group_key_type,
CAM_CONST_ADDR[0], 0,
(u32 *)(&priv->rtllib->swcamtable[0].key_buf[0]));
} else {
netdev_warn(dev,
"%s(): ADHOC CCMP: missing key entry.\n",
__func__);
return;
}
}
}
}
| linux-master | drivers/staging/rtl8192e/rtl8192e/rtl_cam.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Contact Information: wlanfae <[email protected]>
*/
#include "rtl_core.h"
#include "r8192E_phyreg.h"
#include "r8192E_phy.h"
#include "r8190P_rtl8256.h"
void rtl92e_set_bandwidth(struct net_device *dev,
enum ht_channel_width bandwidth)
{
u8 eRFPath;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->card_8192_version != VERSION_8190_BD &&
priv->card_8192_version != VERSION_8190_BE) {
netdev_warn(dev, "%s(): Unknown HW version.\n", __func__);
return;
}
for (eRFPath = 0; eRFPath < priv->num_total_rf_path; eRFPath++) {
switch (bandwidth) {
case HT_CHANNEL_WIDTH_20:
rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
0x0b, bMask12Bits, 0x100);
rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
0x2c, bMask12Bits, 0x3d7);
rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
0x0e, bMask12Bits, 0x021);
break;
case HT_CHANNEL_WIDTH_20_40:
rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
0x0b, bMask12Bits, 0x300);
rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
0x2c, bMask12Bits, 0x3ff);
rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
0x0e, bMask12Bits, 0x0e1);
break;
default:
netdev_err(dev, "%s(): Unknown bandwidth: %#X\n",
__func__, bandwidth);
break;
}
}
}
bool rtl92e_config_rf(struct net_device *dev)
{
u32 u4RegValue = 0;
u8 eRFPath;
bool rtStatus = true;
struct bb_reg_definition *pPhyReg;
struct r8192_priv *priv = rtllib_priv(dev);
u32 RegOffSetToBeCheck = 0x3;
u32 RegValueToBeCheck = 0x7f1;
u32 RF3_Final_Value = 0;
u8 ConstRetryTimes = 5, RetryTimes = 5;
u8 ret = 0;
priv->num_total_rf_path = RTL819X_TOTAL_RF_PATH;
for (eRFPath = (enum rf90_radio_path)RF90_PATH_A;
eRFPath < priv->num_total_rf_path; eRFPath++) {
pPhyReg = &priv->phy_reg_def[eRFPath];
switch (eRFPath) {
case RF90_PATH_A:
u4RegValue = rtl92e_get_bb_reg(dev, pPhyReg->rfintfs,
bRFSI_RFENV);
break;
case RF90_PATH_B:
u4RegValue = rtl92e_get_bb_reg(dev, pPhyReg->rfintfs,
bRFSI_RFENV << 16);
break;
}
rtl92e_set_bb_reg(dev, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
rtl92e_set_bb_reg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2,
b3WireAddressLength, 0x0);
rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2,
b3WireDataLength, 0x0);
rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath, 0x0,
bMask12Bits, 0xbf);
rtStatus = rtl92e_check_bb_and_rf(dev, HW90_BLOCK_RF,
(enum rf90_radio_path)eRFPath);
if (!rtStatus) {
netdev_err(dev, "%s(): Failed to check RF Path %d.\n",
__func__, eRFPath);
goto fail;
}
RetryTimes = ConstRetryTimes;
RF3_Final_Value = 0;
while (RF3_Final_Value != RegValueToBeCheck &&
RetryTimes != 0) {
ret = rtl92e_config_rf_path(dev,
(enum rf90_radio_path)eRFPath);
RF3_Final_Value = rtl92e_get_rf_reg(dev,
(enum rf90_radio_path)eRFPath,
RegOffSetToBeCheck,
bMask12Bits);
RetryTimes--;
}
switch (eRFPath) {
case RF90_PATH_A:
rtl92e_set_bb_reg(dev, pPhyReg->rfintfs, bRFSI_RFENV,
u4RegValue);
break;
case RF90_PATH_B:
rtl92e_set_bb_reg(dev, pPhyReg->rfintfs,
bRFSI_RFENV << 16, u4RegValue);
break;
}
if (ret) {
netdev_err(dev,
"%s(): Failed to initialize RF Path %d.\n",
__func__, eRFPath);
goto fail;
}
}
return true;
fail:
return false;
}
void rtl92e_set_cck_tx_power(struct net_device *dev, u8 powerlevel)
{
u32 TxAGC = 0;
struct r8192_priv *priv = rtllib_priv(dev);
TxAGC = powerlevel;
if (priv->dynamic_tx_low_pwr) {
if (priv->customer_id == RT_CID_819X_NETCORE)
TxAGC = 0x22;
else
TxAGC += priv->cck_pwr_enl;
}
if (TxAGC > 0x24)
TxAGC = 0x24;
rtl92e_set_bb_reg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC);
}
void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 writeVal, powerBase0, powerBase1, writeVal_tmp;
u8 index = 0;
u16 RegOffset[6] = {0xe00, 0xe04, 0xe10, 0xe14, 0xe18, 0xe1c};
u8 byte0, byte1, byte2, byte3;
powerBase0 = powerlevel + priv->legacy_ht_tx_pwr_diff;
powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
(powerBase0 << 8) | powerBase0;
powerBase1 = powerlevel;
powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) |
(powerBase1 << 8) | powerBase1;
for (index = 0; index < 6; index++) {
writeVal = (u32)(priv->mcs_tx_pwr_level_org_offset[index] +
((index < 2) ? powerBase0 : powerBase1));
byte0 = writeVal & 0x7f;
byte1 = (writeVal & 0x7f00) >> 8;
byte2 = (writeVal & 0x7f0000) >> 16;
byte3 = (writeVal & 0x7f000000) >> 24;
if (byte0 > 0x24)
byte0 = 0x24;
if (byte1 > 0x24)
byte1 = 0x24;
if (byte2 > 0x24)
byte2 = 0x24;
if (byte3 > 0x24)
byte3 = 0x24;
if (index == 3) {
writeVal_tmp = (byte3 << 24) | (byte2 << 16) |
(byte1 << 8) | byte0;
priv->pwr_track = writeVal_tmp;
}
if (priv->dynamic_tx_high_pwr)
writeVal = 0x03030303;
else
writeVal = (byte3 << 24) | (byte2 << 16) |
(byte1 << 8) | byte0;
rtl92e_set_bb_reg(dev, RegOffset[index], 0x7f7f7f7f, writeVal);
}
}
| linux-master | drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include "qlge.h"
#include "qlge_devlink.h"
static int qlge_fill_seg_(struct devlink_fmsg *fmsg,
struct mpi_coredump_segment_header *seg_header,
u32 *reg_data)
{
int regs_num = (seg_header->seg_size
- sizeof(struct mpi_coredump_segment_header)) / sizeof(u32);
int err;
int i;
err = devlink_fmsg_pair_nest_start(fmsg, seg_header->description);
if (err)
return err;
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
err = devlink_fmsg_u32_pair_put(fmsg, "segment", seg_header->seg_num);
if (err)
return err;
err = devlink_fmsg_arr_pair_nest_start(fmsg, "values");
if (err)
return err;
for (i = 0; i < regs_num; i++) {
err = devlink_fmsg_u32_put(fmsg, *reg_data);
if (err)
return err;
reg_data++;
}
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)
return err;
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
return err;
err = devlink_fmsg_pair_nest_end(fmsg);
return err;
}
#define FILL_SEG(seg_hdr, seg_regs) \
do { \
err = qlge_fill_seg_(fmsg, &dump->seg_hdr, dump->seg_regs); \
if (err) { \
kvfree(dump); \
return err; \
} \
} while (0)
static int qlge_reporter_coredump(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg, void *priv_ctx,
struct netlink_ext_ack *extack)
{
int err = 0;
struct qlge_adapter *qdev = devlink_health_reporter_priv(reporter);
struct qlge_mpi_coredump *dump;
wait_queue_head_t wait;
if (!netif_running(qdev->ndev))
return 0;
if (test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
if (qlge_own_firmware(qdev)) {
qlge_queue_fw_error(qdev);
init_waitqueue_head(&wait);
wait_event_timeout(wait, 0, 5 * HZ);
} else {
netif_err(qdev, ifup, qdev->ndev,
"Force Coredump failed because this NIC function doesn't own the firmware\n");
return -EPERM;
}
}
dump = kvmalloc(sizeof(*dump), GFP_KERNEL);
if (!dump)
return -ENOMEM;
err = qlge_core_dump(qdev, dump);
if (err) {
kvfree(dump);
return err;
}
qlge_soft_reset_mpi_risc(qdev);
FILL_SEG(core_regs_seg_hdr, mpi_core_regs);
FILL_SEG(test_logic_regs_seg_hdr, test_logic_regs);
FILL_SEG(rmii_regs_seg_hdr, rmii_regs);
FILL_SEG(fcmac1_regs_seg_hdr, fcmac1_regs);
FILL_SEG(fcmac2_regs_seg_hdr, fcmac2_regs);
FILL_SEG(fc1_mbx_regs_seg_hdr, fc1_mbx_regs);
FILL_SEG(ide_regs_seg_hdr, ide_regs);
FILL_SEG(nic1_mbx_regs_seg_hdr, nic1_mbx_regs);
FILL_SEG(smbus_regs_seg_hdr, smbus_regs);
FILL_SEG(fc2_mbx_regs_seg_hdr, fc2_mbx_regs);
FILL_SEG(nic2_mbx_regs_seg_hdr, nic2_mbx_regs);
FILL_SEG(i2c_regs_seg_hdr, i2c_regs);
FILL_SEG(memc_regs_seg_hdr, memc_regs);
FILL_SEG(pbus_regs_seg_hdr, pbus_regs);
FILL_SEG(mde_regs_seg_hdr, mde_regs);
FILL_SEG(nic_regs_seg_hdr, nic_regs);
FILL_SEG(nic2_regs_seg_hdr, nic2_regs);
FILL_SEG(xgmac1_seg_hdr, xgmac1);
FILL_SEG(xgmac2_seg_hdr, xgmac2);
FILL_SEG(code_ram_seg_hdr, code_ram);
FILL_SEG(memc_ram_seg_hdr, memc_ram);
FILL_SEG(xaui_an_hdr, serdes_xaui_an);
FILL_SEG(xaui_hss_pcs_hdr, serdes_xaui_hss_pcs);
FILL_SEG(xfi_an_hdr, serdes_xfi_an);
FILL_SEG(xfi_train_hdr, serdes_xfi_train);
FILL_SEG(xfi_hss_pcs_hdr, serdes_xfi_hss_pcs);
FILL_SEG(xfi_hss_tx_hdr, serdes_xfi_hss_tx);
FILL_SEG(xfi_hss_rx_hdr, serdes_xfi_hss_rx);
FILL_SEG(xfi_hss_pll_hdr, serdes_xfi_hss_pll);
err = qlge_fill_seg_(fmsg, &dump->misc_nic_seg_hdr,
(u32 *)&dump->misc_nic_info);
if (err) {
kvfree(dump);
return err;
}
FILL_SEG(intr_states_seg_hdr, intr_states);
FILL_SEG(cam_entries_seg_hdr, cam_entries);
FILL_SEG(nic_routing_words_seg_hdr, nic_routing_words);
FILL_SEG(ets_seg_hdr, ets);
FILL_SEG(probe_dump_seg_hdr, probe_dump);
FILL_SEG(routing_reg_seg_hdr, routing_regs);
FILL_SEG(mac_prot_reg_seg_hdr, mac_prot_regs);
FILL_SEG(xaui2_an_hdr, serdes2_xaui_an);
FILL_SEG(xaui2_hss_pcs_hdr, serdes2_xaui_hss_pcs);
FILL_SEG(xfi2_an_hdr, serdes2_xfi_an);
FILL_SEG(xfi2_train_hdr, serdes2_xfi_train);
FILL_SEG(xfi2_hss_pcs_hdr, serdes2_xfi_hss_pcs);
FILL_SEG(xfi2_hss_tx_hdr, serdes2_xfi_hss_tx);
FILL_SEG(xfi2_hss_rx_hdr, serdes2_xfi_hss_rx);
FILL_SEG(xfi2_hss_pll_hdr, serdes2_xfi_hss_pll);
FILL_SEG(sem_regs_seg_hdr, sem_regs);
kvfree(dump);
return err;
}
static const struct devlink_health_reporter_ops qlge_reporter_ops = {
.name = "coredump",
.dump = qlge_reporter_coredump,
};
long qlge_health_create_reporters(struct qlge_adapter *priv)
{
struct devlink *devlink;
long err = 0;
devlink = priv_to_devlink(priv);
priv->reporter =
devlink_health_reporter_create(devlink, &qlge_reporter_ops,
0, priv);
if (IS_ERR(priv->reporter)) {
err = PTR_ERR(priv->reporter);
netdev_warn(priv->ndev,
"Failed to create reporter, err = %ld\n",
err);
}
return err;
}
| linux-master | drivers/staging/qlge/qlge_devlink.c |
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include "qlge.h"
/* Read a NIC register from the alternate function. */
static u32 qlge_read_other_func_reg(struct qlge_adapter *qdev,
u32 reg)
{
u32 register_to_read;
u32 reg_val;
unsigned int status = 0;
register_to_read = MPI_NIC_REG_BLOCK
| MPI_NIC_READ
| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
| reg;
status = qlge_read_mpi_reg(qdev, register_to_read, ®_val);
if (status != 0)
return 0xffffffff;
return reg_val;
}
/* Write a NIC register from the alternate function. */
static int qlge_write_other_func_reg(struct qlge_adapter *qdev,
u32 reg, u32 reg_val)
{
u32 register_to_read;
register_to_read = MPI_NIC_REG_BLOCK
| MPI_NIC_READ
| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
| reg;
return qlge_write_mpi_reg(qdev, register_to_read, reg_val);
}
static int qlge_wait_other_func_reg_rdy(struct qlge_adapter *qdev, u32 reg,
u32 bit, u32 err_bit)
{
u32 temp;
int count;
for (count = 10; count; count--) {
temp = qlge_read_other_func_reg(qdev, reg);
/* check for errors */
if (temp & err_bit)
return -1;
else if (temp & bit)
return 0;
mdelay(10);
}
return -1;
}
static int qlge_read_other_func_serdes_reg(struct qlge_adapter *qdev, u32 reg,
u32 *data)
{
int status;
/* wait for reg to come ready */
status = qlge_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
/* set up for reg read */
qlge_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R);
/* wait for reg to come ready */
status = qlge_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
/* get the data */
*data = qlge_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
exit:
return status;
}
/* Read out the SERDES registers */
static int qlge_read_serdes_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
{
int status;
/* wait for reg to come ready */
status = qlge_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
/* set up for reg read */
qlge_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
/* wait for reg to come ready */
status = qlge_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
/* get the data */
*data = qlge_read32(qdev, XG_SERDES_DATA);
exit:
return status;
}
static void qlge_get_both_serdes(struct qlge_adapter *qdev, u32 addr,
u32 *direct_ptr, u32 *indirect_ptr,
bool direct_valid, bool indirect_valid)
{
unsigned int status;
status = 1;
if (direct_valid)
status = qlge_read_serdes_reg(qdev, addr, direct_ptr);
/* Dead fill any failures or invalids. */
if (status)
*direct_ptr = 0xDEADBEEF;
status = 1;
if (indirect_valid)
status = qlge_read_other_func_serdes_reg(qdev, addr,
indirect_ptr);
/* Dead fill any failures or invalids. */
if (status)
*indirect_ptr = 0xDEADBEEF;
}
static int qlge_get_serdes_regs(struct qlge_adapter *qdev,
struct qlge_mpi_coredump *mpi_coredump)
{
int status;
bool xfi_direct_valid = false, xfi_indirect_valid = false;
bool xaui_direct_valid = true, xaui_indirect_valid = true;
unsigned int i;
u32 *direct_ptr, temp;
u32 *indirect_ptr;
/* The XAUI needs to be read out per port */
status = qlge_read_other_func_serdes_reg(qdev,
XG_SERDES_XAUI_HSS_PCS_START,
&temp);
if (status)
temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
XG_SERDES_ADDR_XAUI_PWR_DOWN)
xaui_indirect_valid = false;
status = qlge_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
if (status)
temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
XG_SERDES_ADDR_XAUI_PWR_DOWN)
xaui_direct_valid = false;
/*
* XFI register is shared so only need to read one
* functions and then check the bits.
*/
status = qlge_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
if (status)
temp = 0;
if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
XG_SERDES_ADDR_XFI1_PWR_UP) {
/* now see if i'm NIC 1 or NIC 2 */
if (qdev->func & 1)
/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
xfi_indirect_valid = true;
else
xfi_direct_valid = true;
}
if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
XG_SERDES_ADDR_XFI2_PWR_UP) {
/* now see if i'm NIC 1 or NIC 2 */
if (qdev->func & 1)
/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
xfi_direct_valid = true;
else
xfi_indirect_valid = true;
}
/* Get XAUI_AN register block. */
if (qdev->func & 1) {
/* Function 2 is direct */
direct_ptr = mpi_coredump->serdes2_xaui_an;
indirect_ptr = mpi_coredump->serdes_xaui_an;
} else {
/* Function 1 is direct */
direct_ptr = mpi_coredump->serdes_xaui_an;
indirect_ptr = mpi_coredump->serdes2_xaui_an;
}
for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
xaui_direct_valid, xaui_indirect_valid);
/* Get XAUI_HSS_PCS register block. */
if (qdev->func & 1) {
direct_ptr =
mpi_coredump->serdes2_xaui_hss_pcs;
indirect_ptr =
mpi_coredump->serdes_xaui_hss_pcs;
} else {
direct_ptr =
mpi_coredump->serdes_xaui_hss_pcs;
indirect_ptr =
mpi_coredump->serdes2_xaui_hss_pcs;
}
for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
xaui_direct_valid, xaui_indirect_valid);
/* Get XAUI_XFI_AN register block. */
if (qdev->func & 1) {
direct_ptr = mpi_coredump->serdes2_xfi_an;
indirect_ptr = mpi_coredump->serdes_xfi_an;
} else {
direct_ptr = mpi_coredump->serdes_xfi_an;
indirect_ptr = mpi_coredump->serdes2_xfi_an;
}
for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_TRAIN register block. */
if (qdev->func & 1) {
direct_ptr = mpi_coredump->serdes2_xfi_train;
indirect_ptr =
mpi_coredump->serdes_xfi_train;
} else {
direct_ptr = mpi_coredump->serdes_xfi_train;
indirect_ptr =
mpi_coredump->serdes2_xfi_train;
}
for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_PCS register block. */
if (qdev->func & 1) {
direct_ptr =
mpi_coredump->serdes2_xfi_hss_pcs;
indirect_ptr =
mpi_coredump->serdes_xfi_hss_pcs;
} else {
direct_ptr =
mpi_coredump->serdes_xfi_hss_pcs;
indirect_ptr =
mpi_coredump->serdes2_xfi_hss_pcs;
}
for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_TX register block. */
if (qdev->func & 1) {
direct_ptr =
mpi_coredump->serdes2_xfi_hss_tx;
indirect_ptr =
mpi_coredump->serdes_xfi_hss_tx;
} else {
direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
indirect_ptr =
mpi_coredump->serdes2_xfi_hss_tx;
}
for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_RX register block. */
if (qdev->func & 1) {
direct_ptr =
mpi_coredump->serdes2_xfi_hss_rx;
indirect_ptr =
mpi_coredump->serdes_xfi_hss_rx;
} else {
direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
indirect_ptr =
mpi_coredump->serdes2_xfi_hss_rx;
}
for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_PLL register block. */
if (qdev->func & 1) {
direct_ptr =
mpi_coredump->serdes2_xfi_hss_pll;
indirect_ptr =
mpi_coredump->serdes_xfi_hss_pll;
} else {
direct_ptr =
mpi_coredump->serdes_xfi_hss_pll;
indirect_ptr =
mpi_coredump->serdes2_xfi_hss_pll;
}
for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
xfi_direct_valid, xfi_indirect_valid);
return 0;
}
static int qlge_read_other_func_xgmac_reg(struct qlge_adapter *qdev, u32 reg,
u32 *data)
{
int status = 0;
/* wait for reg to come ready */
status = qlge_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* set up for reg read */
qlge_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
/* wait for reg to come ready */
status = qlge_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* get the data */
*data = qlge_read_other_func_reg(qdev, XGMAC_DATA / 4);
exit:
return status;
}
/* Read the 400 xgmac control/statistics registers
* skipping unused locations.
*/
static int qlge_get_xgmac_regs(struct qlge_adapter *qdev, u32 *buf,
unsigned int other_function)
{
int status = 0;
int i;
for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
/* We're reading 400 xgmac registers, but we filter out
* several locations that are non-responsive to reads.
*/
if ((i == 0x00000114) || (i == 0x00000118) ||
(i == 0x0000013c) || (i == 0x00000140) ||
(i > 0x00000150 && i < 0x000001fc) ||
(i > 0x00000278 && i < 0x000002a0) ||
(i > 0x000002c0 && i < 0x000002cf) ||
(i > 0x000002dc && i < 0x000002f0) ||
(i > 0x000003c8 && i < 0x00000400) ||
(i > 0x00000400 && i < 0x00000410) ||
(i > 0x00000410 && i < 0x00000420) ||
(i > 0x00000420 && i < 0x00000430) ||
(i > 0x00000430 && i < 0x00000440) ||
(i > 0x00000440 && i < 0x00000450) ||
(i > 0x00000450 && i < 0x00000500) ||
(i > 0x0000054c && i < 0x00000568) ||
(i > 0x000005c8 && i < 0x00000600)) {
if (other_function)
status = qlge_read_other_func_xgmac_reg(qdev, i, buf);
else
status = qlge_read_xgmac_reg(qdev, i, buf);
if (status)
*buf = 0xdeadbeef;
break;
}
}
return status;
}
static int qlge_get_ets_regs(struct qlge_adapter *qdev, u32 *buf)
{
int i;
for (i = 0; i < 8; i++, buf++) {
qlge_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
*buf = qlge_read32(qdev, NIC_ETS);
}
for (i = 0; i < 2; i++, buf++) {
qlge_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
*buf = qlge_read32(qdev, CNA_ETS);
}
return 0;
}
static void qlge_get_intr_states(struct qlge_adapter *qdev, u32 *buf)
{
int i;
for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
qlge_write32(qdev, INTR_EN,
qdev->intr_context[i].intr_read_mask);
*buf = qlge_read32(qdev, INTR_EN);
}
}
static int qlge_get_cam_entries(struct qlge_adapter *qdev, u32 *buf)
{
int i, status;
u32 value[3];
status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
for (i = 0; i < 16; i++) {
status = qlge_get_mac_addr_reg(qdev,
MAC_ADDR_TYPE_CAM_MAC, i, value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of mac index register\n");
goto err;
}
*buf++ = value[0]; /* lower MAC address */
*buf++ = value[1]; /* upper MAC address */
*buf++ = value[2]; /* output */
}
for (i = 0; i < 32; i++) {
status = qlge_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_MULTI_MAC,
i, value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of mac index register\n");
goto err;
}
*buf++ = value[0]; /* lower Mcast address */
*buf++ = value[1]; /* upper Mcast address */
}
err:
qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
static int qlge_get_routing_entries(struct qlge_adapter *qdev, u32 *buf)
{
int status;
u32 value, i;
status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
for (i = 0; i < 16; i++) {
status = qlge_get_routing_reg(qdev, i, &value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of routing index register\n");
goto err;
} else {
*buf++ = value;
}
}
err:
qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
/* Read the MPI Processor shadow registers */
static int qlge_get_mpi_shadow_regs(struct qlge_adapter *qdev, u32 *buf)
{
u32 i;
int status;
for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
status = qlge_write_mpi_reg(qdev,
RISC_124,
(SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
if (status)
goto end;
status = qlge_read_mpi_reg(qdev, RISC_127, buf);
if (status)
goto end;
}
end:
return status;
}
/* Read the MPI Processor core registers */
static int qlge_get_mpi_regs(struct qlge_adapter *qdev, u32 *buf,
u32 offset, u32 count)
{
int i, status = 0;
for (i = 0; i < count; i++, buf++) {
status = qlge_read_mpi_reg(qdev, offset + i, buf);
if (status)
return status;
}
return status;
}
/* Read the ASIC probe dump */
static unsigned int *qlge_get_probe(struct qlge_adapter *qdev, u32 clock,
u32 valid, u32 *buf)
{
u32 module, mux_sel, probe, lo_val, hi_val;
for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
if (!((valid >> module) & 1))
continue;
for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
probe = clock
| PRB_MX_ADDR_ARE
| mux_sel
| (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
qlge_write32(qdev, PRB_MX_ADDR, probe);
lo_val = qlge_read32(qdev, PRB_MX_DATA);
if (mux_sel == 0) {
*buf = probe;
buf++;
}
probe |= PRB_MX_ADDR_UP;
qlge_write32(qdev, PRB_MX_ADDR, probe);
hi_val = qlge_read32(qdev, PRB_MX_DATA);
*buf = lo_val;
buf++;
*buf = hi_val;
buf++;
}
}
return buf;
}
static int qlge_get_probe_dump(struct qlge_adapter *qdev, unsigned int *buf)
{
/* First we have to enable the probe mux */
qlge_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
buf = qlge_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
PRB_MX_ADDR_VALID_SYS_MOD, buf);
buf = qlge_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
PRB_MX_ADDR_VALID_PCI_MOD, buf);
buf = qlge_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
PRB_MX_ADDR_VALID_XGM_MOD, buf);
buf = qlge_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
PRB_MX_ADDR_VALID_FC_MOD, buf);
return 0;
}
/* Read out the routing index registers */
static int qlge_get_routing_index_registers(struct qlge_adapter *qdev, u32 *buf)
{
int status;
u32 type, index, index_max;
u32 result_index;
u32 result_data;
u32 val;
status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
for (type = 0; type < 4; type++) {
if (type < 2)
index_max = 8;
else
index_max = 16;
for (index = 0; index < index_max; index++) {
val = RT_IDX_RS
| (type << RT_IDX_TYPE_SHIFT)
| (index << RT_IDX_IDX_SHIFT);
qlge_write32(qdev, RT_IDX, val);
result_index = 0;
while ((result_index & RT_IDX_MR) == 0)
result_index = qlge_read32(qdev, RT_IDX);
result_data = qlge_read32(qdev, RT_DATA);
*buf = type;
buf++;
*buf = index;
buf++;
*buf = result_index;
buf++;
*buf = result_data;
buf++;
}
}
qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
/* Read out the MAC protocol registers */
static void qlge_get_mac_protocol_registers(struct qlge_adapter *qdev, u32 *buf)
{
u32 result_index, result_data;
u32 type;
u32 index;
u32 offset;
u32 val;
u32 initial_val = MAC_ADDR_RS;
u32 max_index;
u32 max_offset;
for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
switch (type) {
case 0: /* CAM */
initial_val |= MAC_ADDR_ADR;
max_index = MAC_ADDR_MAX_CAM_ENTRIES;
max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
break;
case 1: /* Multicast MAC Address */
max_index = MAC_ADDR_MAX_CAM_WCOUNT;
max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
break;
case 2: /* VLAN filter mask */
case 3: /* MC filter mask */
max_index = MAC_ADDR_MAX_CAM_WCOUNT;
max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
break;
case 4: /* FC MAC addresses */
max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
break;
case 5: /* Mgmt MAC addresses */
max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
break;
case 6: /* Mgmt VLAN addresses */
max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
break;
case 7: /* Mgmt IPv4 address */
max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
break;
case 8: /* Mgmt IPv6 address */
max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
break;
case 9: /* Mgmt TCP/UDP Dest port */
max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
break;
default:
netdev_err(qdev->ndev, "Bad type!!! 0x%08x\n", type);
max_index = 0;
max_offset = 0;
break;
}
for (index = 0; index < max_index; index++) {
for (offset = 0; offset < max_offset; offset++) {
val = initial_val
| (type << MAC_ADDR_TYPE_SHIFT)
| (index << MAC_ADDR_IDX_SHIFT)
| (offset);
qlge_write32(qdev, MAC_ADDR_IDX, val);
result_index = 0;
while ((result_index & MAC_ADDR_MR) == 0) {
result_index = qlge_read32(qdev,
MAC_ADDR_IDX);
}
result_data = qlge_read32(qdev, MAC_ADDR_DATA);
*buf = result_index;
buf++;
*buf = result_data;
buf++;
}
}
}
}
static void qlge_get_sem_registers(struct qlge_adapter *qdev, u32 *buf)
{
u32 func_num, reg, reg_val;
int status;
for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
reg = MPI_NIC_REG_BLOCK
| (func_num << MPI_NIC_FUNCTION_SHIFT)
| (SEM / 4);
status = qlge_read_mpi_reg(qdev, reg, ®_val);
*buf = reg_val;
/* if the read failed then dead fill the element. */
if (!status)
*buf = 0xdeadbeef;
buf++;
}
}
/* Create a coredump segment header */
static void qlge_build_coredump_seg_header(struct mpi_coredump_segment_header *seg_hdr,
u32 seg_number, u32 seg_size, u8 *desc)
{
memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
seg_hdr->seg_num = seg_number;
seg_hdr->seg_size = seg_size;
strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
/*
* This function should be called when a coredump / probedump
* is to be extracted from the HBA. It is assumed there is a
* qdev structure that contains the base address of the register
* space for this function as well as a coredump structure that
* will contain the dump.
*/
int qlge_core_dump(struct qlge_adapter *qdev, struct qlge_mpi_coredump *mpi_coredump)
{
int status;
int i;
if (!mpi_coredump) {
netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
return -EINVAL;
}
/* Try to get the spinlock, but dont worry if
* it isn't available. If the firmware died it
* might be holding the sem.
*/
qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK);
status = qlge_pause_mpi_risc(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed RISC pause. Status = 0x%.08x\n", status);
goto err;
}
/* Insert the global header */
memset(&mpi_coredump->mpi_global_header, 0,
sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
mpi_coredump->mpi_global_header.header_size =
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.image_size =
sizeof(struct qlge_mpi_coredump);
strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.id_string));
/* Get generic NIC reg dump */
qlge_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
NIC1_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
qlge_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
NIC2_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
/* Get XGMac registers. (Segment 18, Rev C. step 21) */
qlge_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
NIC1_XGMAC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
NIC2_XGMAC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
if (qdev->func & 1) {
/* Odd means our function is NIC 2 */
for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
mpi_coredump->nic2_regs[i] =
qlge_read32(qdev, i * sizeof(u32));
for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
mpi_coredump->nic_regs[i] =
qlge_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
} else {
/* Even means our function is NIC 1 */
for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
mpi_coredump->nic_regs[i] =
qlge_read32(qdev, i * sizeof(u32));
for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
mpi_coredump->nic2_regs[i] =
qlge_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
}
/* Rev C. Step 20a */
qlge_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
XAUI_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xaui_an),
"XAUI AN Registers");
/* Rev C. Step 20b */
qlge_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
XAUI_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xaui_hss_pcs),
"XAUI HSS PCS Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_an),
"XFI AN Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
XFI_TRAIN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_train),
"XFI TRAIN Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
XFI_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_pcs),
"XFI HSS PCS Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
XFI_HSS_TX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_tx),
"XFI HSS TX Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
XFI_HSS_RX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_rx),
"XFI HSS RX Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
XFI_HSS_PLL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_pll),
"XFI HSS PLL Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
XAUI2_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xaui_an),
"XAUI2 AN Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
XAUI2_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
"XAUI2 HSS PCS Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
XFI2_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_an),
"XFI2 AN Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
XFI2_TRAIN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_train),
"XFI2 TRAIN Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
XFI2_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
"XFI2 HSS PCS Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
XFI2_HSS_TX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_tx),
"XFI2 HSS TX Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
XFI2_HSS_RX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_rx),
"XFI2 HSS RX Registers");
qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
XFI2_HSS_PLL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_pll),
"XFI2 HSS PLL Registers");
status = qlge_get_serdes_regs(qdev, mpi_coredump);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of Serdes Registers. Status = 0x%.08x\n",
status);
goto err;
}
qlge_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
CORE_SEG_NUM,
sizeof(mpi_coredump->core_regs_seg_hdr) +
sizeof(mpi_coredump->mpi_core_regs) +
sizeof(mpi_coredump->mpi_core_sh_regs),
"Core Registers");
/* Get the MPI Core Registers */
status = qlge_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
if (status)
goto err;
/* Get the 16 MPI shadow registers */
status = qlge_get_mpi_shadow_regs(qdev,
&mpi_coredump->mpi_core_sh_regs[0]);
if (status)
goto err;
/* Get the Test Logic Registers */
qlge_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
TEST_LOGIC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->test_logic_regs),
"Test Logic Regs");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
TEST_REGS_ADDR, TEST_REGS_CNT);
if (status)
goto err;
/* Get the RMII Registers */
qlge_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
RMII_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->rmii_regs),
"RMII Registers");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
RMII_REGS_ADDR, RMII_REGS_CNT);
if (status)
goto err;
/* Get the FCMAC1 Registers */
qlge_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
FCMAC1_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fcmac1_regs),
"FCMAC1 Registers");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
if (status)
goto err;
/* Get the FCMAC2 Registers */
qlge_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
FCMAC2_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fcmac2_regs),
"FCMAC2 Registers");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
if (status)
goto err;
/* Get the FC1 MBX Registers */
qlge_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
FC1_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fc1_mbx_regs),
"FC1 MBox Regs");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
if (status)
goto err;
/* Get the IDE Registers */
qlge_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
IDE_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ide_regs),
"IDE Registers");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
IDE_REGS_ADDR, IDE_REGS_CNT);
if (status)
goto err;
/* Get the NIC1 MBX Registers */
qlge_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
NIC1_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic1_mbx_regs),
"NIC1 MBox Regs");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
if (status)
goto err;
/* Get the SMBus Registers */
qlge_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
SMBUS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->smbus_regs),
"SMBus Registers");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
if (status)
goto err;
/* Get the FC2 MBX Registers */
qlge_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
FC2_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fc2_mbx_regs),
"FC2 MBox Regs");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
if (status)
goto err;
/* Get the NIC2 MBX Registers */
qlge_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
NIC2_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic2_mbx_regs),
"NIC2 MBox Regs");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
if (status)
goto err;
/* Get the I2C Registers */
qlge_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
I2C_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->i2c_regs),
"I2C Registers");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
I2C_REGS_ADDR, I2C_REGS_CNT);
if (status)
goto err;
/* Get the MEMC Registers */
qlge_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
MEMC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->memc_regs),
"MEMC Registers");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
MEMC_REGS_ADDR, MEMC_REGS_CNT);
if (status)
goto err;
/* Get the PBus Registers */
qlge_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
PBUS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->pbus_regs),
"PBUS Registers");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
PBUS_REGS_ADDR, PBUS_REGS_CNT);
if (status)
goto err;
/* Get the MDE Registers */
qlge_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
MDE_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->mde_regs),
"MDE Registers");
status = qlge_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
MDE_REGS_ADDR, MDE_REGS_CNT);
if (status)
goto err;
qlge_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
MISC_NIC_INFO_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->misc_nic_info),
"MISC NIC INFO");
mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
mpi_coredump->misc_nic_info.function = qdev->func;
/* Segment 31 */
/* Get indexed register values. */
qlge_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
INTR_STATES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->intr_states),
"INTR States");
qlge_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
qlge_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
CAM_ENTRIES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->cam_entries),
"CAM Entries");
status = qlge_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
if (status)
goto err;
qlge_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
ROUTING_WORDS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_routing_words),
"Routing Words");
status = qlge_get_routing_entries(qdev,
&mpi_coredump->nic_routing_words[0]);
if (status)
goto err;
/* Segment 34 (Rev C. step 23) */
qlge_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
ETS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ets),
"ETS Registers");
status = qlge_get_ets_regs(qdev, &mpi_coredump->ets[0]);
if (status)
goto err;
qlge_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
PROBE_DUMP_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->probe_dump),
"Probe Dump");
qlge_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
qlge_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
ROUTING_INDEX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->routing_regs),
"Routing Regs");
status = qlge_get_routing_index_registers(qdev,
&mpi_coredump->routing_regs[0]);
if (status)
goto err;
qlge_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
MAC_PROTOCOL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->mac_prot_regs),
"MAC Prot Regs");
qlge_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
/* Get the semaphore registers for all 5 functions */
qlge_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
SEM_REGS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->sem_regs), "Sem Registers");
qlge_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
/* Prevent the mpi restarting while we dump the memory.*/
qlge_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
/* clear the pause */
status = qlge_unpause_mpi_risc(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed RISC unpause. Status = 0x%.08x\n", status);
goto err;
}
/* Reset the RISC so we can dump RAM */
status = qlge_hard_reset_mpi_risc(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed RISC reset. Status = 0x%.08x\n", status);
goto err;
}
qlge_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
WCS_RAM_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->code_ram),
"WCS RAM");
status = qlge_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
CODE_RAM_ADDR, CODE_RAM_CNT);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of CODE RAM. Status = 0x%.08x\n",
status);
goto err;
}
/* Insert the segment header */
qlge_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
MEMC_RAM_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->memc_ram),
"MEMC RAM");
status = qlge_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
MEMC_RAM_ADDR, MEMC_RAM_CNT);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of MEMC RAM. Status = 0x%.08x\n",
status);
goto err;
}
err:
qlge_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
return status;
}
static void qlge_get_core_dump(struct qlge_adapter *qdev)
{
if (!qlge_own_firmware(qdev)) {
netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
return;
}
if (!netif_running(qdev->ndev)) {
netif_err(qdev, ifup, qdev->ndev,
"Force Coredump can only be done from interface that is up\n");
return;
}
qlge_queue_fw_error(qdev);
}
static void qlge_gen_reg_dump(struct qlge_adapter *qdev,
struct qlge_reg_dump *mpi_coredump)
{
int i, status;
memset(&mpi_coredump->mpi_global_header, 0,
sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
mpi_coredump->mpi_global_header.header_size =
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.image_size =
sizeof(struct qlge_reg_dump);
strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.id_string));
/* segment 16 */
qlge_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
MISC_NIC_INFO_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->misc_nic_info),
"MISC NIC INFO");
mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
mpi_coredump->misc_nic_info.function = qdev->func;
/* Segment 16, Rev C. Step 18 */
qlge_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
NIC1_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_regs),
"NIC Registers");
/* Get generic reg dump */
for (i = 0; i < 64; i++)
mpi_coredump->nic_regs[i] = qlge_read32(qdev, i * sizeof(u32));
/* Segment 31 */
/* Get indexed register values. */
qlge_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
INTR_STATES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->intr_states),
"INTR States");
qlge_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
qlge_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
CAM_ENTRIES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->cam_entries),
"CAM Entries");
status = qlge_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
if (status)
return;
qlge_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
ROUTING_WORDS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_routing_words),
"Routing Words");
status = qlge_get_routing_entries(qdev,
&mpi_coredump->nic_routing_words[0]);
if (status)
return;
/* Segment 34 (Rev C. step 23) */
qlge_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
ETS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ets),
"ETS Registers");
status = qlge_get_ets_regs(qdev, &mpi_coredump->ets[0]);
if (status)
return;
}
void qlge_get_dump(struct qlge_adapter *qdev, void *buff)
{
/*
* If the dump has already been taken and is stored
* in our internal buffer and if force dump is set then
* just start the spool to dump it to the log file
* and also, take a snapshot of the general regs
* to the user's buffer or else take complete dump
* to the user's buffer if force is not set.
*/
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
if (!qlge_core_dump(qdev, buff))
qlge_soft_reset_mpi_risc(qdev);
else
netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
} else {
qlge_gen_reg_dump(qdev, buff);
qlge_get_core_dump(qdev);
}
}
| linux-master | drivers/staging/qlge/qlge_dbg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlge NIC HBA Driver
* Copyright (c) 2003-2008 QLogic Corporation
* Author: Linux qlge network device driver by
* Ron Mercer <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
#include <linux/mempool.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/prefetch.h>
#include <net/ip6_checksum.h>
#include "qlge.h"
#include "qlge_devlink.h"
char qlge_driver_name[] = DRV_NAME;
const char qlge_driver_version[] = DRV_VERSION;
MODULE_AUTHOR("Ron Mercer <[email protected]>");
MODULE_DESCRIPTION(DRV_STRING " ");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
static const u32 default_msg =
NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP |
NETIF_MSG_RX_ERR |
NETIF_MSG_TX_ERR |
NETIF_MSG_HW | NETIF_MSG_WOL | 0;
static int debug = -1; /* defaults above */
module_param(debug, int, 0664);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define MSIX_IRQ 0
#define MSI_IRQ 1
#define LEG_IRQ 2
static int qlge_irq_type = MSIX_IRQ;
module_param(qlge_irq_type, int, 0664);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static int qlge_mpi_coredump;
module_param(qlge_mpi_coredump, int, 0);
MODULE_PARM_DESC(qlge_mpi_coredump,
"Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
static int qlge_force_coredump;
module_param(qlge_force_coredump, int, 0);
MODULE_PARM_DESC(qlge_force_coredump,
"Option to allow force of firmware core dump. Default is OFF - Do not allow.");
static const struct pci_device_id qlge_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
/* required last entry */
{0,}
};
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
static int qlge_wol(struct qlge_adapter *);
static void qlge_set_multicast_list(struct net_device *);
static int qlge_adapter_down(struct qlge_adapter *);
static int qlge_adapter_up(struct qlge_adapter *);
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
* FCOE firmware and the FC driver.
*/
static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask)
{
u32 sem_bits = 0;
switch (sem_mask) {
case SEM_XGMAC0_MASK:
sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
break;
case SEM_XGMAC1_MASK:
sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
break;
case SEM_ICB_MASK:
sem_bits = SEM_SET << SEM_ICB_SHIFT;
break;
case SEM_MAC_ADDR_MASK:
sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
break;
case SEM_FLASH_MASK:
sem_bits = SEM_SET << SEM_FLASH_SHIFT;
break;
case SEM_PROBE_MASK:
sem_bits = SEM_SET << SEM_PROBE_SHIFT;
break;
case SEM_RT_IDX_MASK:
sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
break;
case SEM_PROC_REG_MASK:
sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
break;
default:
netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
return -EINVAL;
}
qlge_write32(qdev, SEM, sem_bits | sem_mask);
return !(qlge_read32(qdev, SEM) & sem_bits);
}
int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask)
{
unsigned int wait_count = 30;
do {
if (!qlge_sem_trylock(qdev, sem_mask))
return 0;
udelay(100);
} while (--wait_count);
return -ETIMEDOUT;
}
void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask)
{
qlge_write32(qdev, SEM, sem_mask);
qlge_read32(qdev, SEM); /* flush */
}
/* This function waits for a specific bit to come ready
* in a given register. It is used mostly by the initialize
* process, but is also used in kernel thread API such as
* netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
*/
int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
{
u32 temp;
int count;
for (count = 0; count < UDELAY_COUNT; count++) {
temp = qlge_read32(qdev, reg);
/* check for errors */
if (temp & err_bit) {
netif_alert(qdev, probe, qdev->ndev,
"register 0x%.08x access error, value = 0x%.08x!.\n",
reg, temp);
return -EIO;
} else if (temp & bit) {
return 0;
}
udelay(UDELAY_DELAY);
}
netif_alert(qdev, probe, qdev->ndev,
"Timed out waiting for reg %x to come ready.\n", reg);
return -ETIMEDOUT;
}
/* The CFG register is used to download TX and RX control blocks
* to the chip. This function waits for an operation to complete.
*/
static int qlge_wait_cfg(struct qlge_adapter *qdev, u32 bit)
{
int count;
u32 temp;
for (count = 0; count < UDELAY_COUNT; count++) {
temp = qlge_read32(qdev, CFG);
if (temp & CFG_LE)
return -EIO;
if (!(temp & bit))
return 0;
udelay(UDELAY_DELAY);
}
return -ETIMEDOUT;
}
/* Used to issue init control blocks to hw. Maps control block,
* sets address, triggers download, waits for completion.
*/
int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
u16 q_id)
{
u64 map;
int status = 0;
int direction;
u32 mask;
u32 value;
if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
direction = DMA_TO_DEVICE;
else
direction = DMA_FROM_DEVICE;
map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
if (dma_mapping_error(&qdev->pdev->dev, map)) {
netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
return -ENOMEM;
}
status = qlge_sem_spinlock(qdev, SEM_ICB_MASK);
if (status)
goto lock_failed;
status = qlge_wait_cfg(qdev, bit);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Timed out waiting for CFG to come ready.\n");
goto exit;
}
qlge_write32(qdev, ICB_L, (u32)map);
qlge_write32(qdev, ICB_H, (u32)(map >> 32));
mask = CFG_Q_MASK | (bit << 16);
value = bit | (q_id << CFG_Q_SHIFT);
qlge_write32(qdev, CFG, (mask | value));
/*
* Wait for the bit to clear after signaling hw.
*/
status = qlge_wait_cfg(qdev, bit);
exit:
qlge_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
lock_failed:
dma_unmap_single(&qdev->pdev->dev, map, size, direction);
return status;
}
/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
u32 *value)
{
u32 offset = 0;
int status;
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
case MAC_ADDR_TYPE_CAM_MAC: {
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
qlge_write32(qdev, MAC_ADDR_IDX,
(offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS |
type); /* type */
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
break;
*value++ = qlge_read32(qdev, MAC_ADDR_DATA);
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
qlge_write32(qdev, MAC_ADDR_IDX,
(offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS |
type); /* type */
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
break;
*value++ = qlge_read32(qdev, MAC_ADDR_DATA);
if (type == MAC_ADDR_TYPE_CAM_MAC) {
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
MAC_ADDR_MW, 0);
if (status)
break;
qlge_write32(qdev, MAC_ADDR_IDX,
(offset++) | /* offset */
(index
<< MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR |
MAC_ADDR_RS | type); /* type */
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
MAC_ADDR_MR, 0);
if (status)
break;
*value++ = qlge_read32(qdev, MAC_ADDR_DATA);
}
break;
}
case MAC_ADDR_TYPE_VLAN:
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
netif_crit(qdev, ifup, qdev->ndev,
"Address type %d not yet supported.\n", type);
status = -EPERM;
}
return status;
}
/* Set up a MAC, multicast or VLAN address for the
* inbound frame matching.
*/
static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, const u8 *addr,
u32 type, u16 index)
{
u32 offset = 0;
int status = 0;
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC: {
u32 upper = (addr[0] << 8) | addr[1];
u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
qlge_write32(qdev, MAC_ADDR_IDX,
(offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
MAC_ADDR_E);
qlge_write32(qdev, MAC_ADDR_DATA, lower);
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
qlge_write32(qdev, MAC_ADDR_IDX,
(offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
MAC_ADDR_E);
qlge_write32(qdev, MAC_ADDR_DATA, upper);
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
break;
}
case MAC_ADDR_TYPE_CAM_MAC: {
u32 cam_output;
u32 upper = (addr[0] << 8) | addr[1];
u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
qlge_write32(qdev, MAC_ADDR_IDX,
(offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
qlge_write32(qdev, MAC_ADDR_DATA, lower);
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
qlge_write32(qdev, MAC_ADDR_IDX,
(offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
qlge_write32(qdev, MAC_ADDR_DATA, upper);
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
qlge_write32(qdev, MAC_ADDR_IDX,
(offset) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
/* This field should also include the queue id
* and possibly the function id. Right now we hardcode
* the route field to NIC core.
*/
cam_output = (CAM_OUT_ROUTE_NIC |
(qdev->func << CAM_OUT_FUNC_SHIFT) |
(0 << CAM_OUT_CQ_ID_SHIFT));
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
cam_output |= CAM_OUT_RV;
/* route to NIC core */
qlge_write32(qdev, MAC_ADDR_DATA, cam_output);
break;
}
case MAC_ADDR_TYPE_VLAN: {
u32 enable_bit = *((u32 *)&addr[0]);
/* For VLAN, the addr actually holds a bit that
* either enables or disables the vlan id we are
* addressing. It's either MAC_ADDR_E on or off.
* That's bit-27 we're talking about.
*/
status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
break;
qlge_write32(qdev, MAC_ADDR_IDX,
offset | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type | /* type */
enable_bit); /* enable/disable */
break;
}
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
netif_crit(qdev, ifup, qdev->ndev,
"Address type %d not yet supported.\n", type);
status = -EPERM;
}
return status;
}
/* Set or clear MAC address in hardware. We sometimes
* have to clear it to prevent wrong frame routing
* especially in a bonding environment.
*/
static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set)
{
int status;
char zero_mac_addr[ETH_ALEN];
char *addr;
if (set) {
addr = &qdev->current_mac_addr[0];
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Set Mac addr %pM\n", addr);
} else {
eth_zero_addr(zero_mac_addr);
addr = &zero_mac_addr[0];
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Clearing MAC address\n");
}
status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
status = qlge_set_mac_addr_reg(qdev, (const u8 *)addr,
MAC_ADDR_TYPE_CAM_MAC,
qdev->func * MAX_CQ);
qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init mac address.\n");
return status;
}
void qlge_link_on(struct qlge_adapter *qdev)
{
netif_err(qdev, link, qdev->ndev, "Link is up.\n");
netif_carrier_on(qdev->ndev);
qlge_set_mac_addr(qdev, 1);
}
void qlge_link_off(struct qlge_adapter *qdev)
{
netif_err(qdev, link, qdev->ndev, "Link is down.\n");
netif_carrier_off(qdev->ndev);
qlge_set_mac_addr(qdev, 0);
}
/* Get a specific frame routing value from the CAM.
* Used for debug and reg dump.
*/
int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value)
{
int status = 0;
status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
if (status)
goto exit;
qlge_write32(qdev, RT_IDX,
RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
if (status)
goto exit;
*value = qlge_read32(qdev, RT_DATA);
exit:
return status;
}
/* The NIC function for this chip has 16 routing indexes. Each one can be used
* to route different frame types to various inbound queues. We send broadcast/
* multicast/error frames to the default queue for slow handling,
* and CAM hit/RSS frames to the fast handling queues.
*/
static int qlge_set_routing_reg(struct qlge_adapter *qdev, u32 index, u32 mask,
int enable)
{
int status = -EINVAL; /* Return error if no mask match. */
u32 value = 0;
switch (mask) {
case RT_IDX_CAM_HIT:
{
value = RT_IDX_DST_CAM_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_VALID: /* Promiscuous Mode frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_IP_CSUM_ERR_SLOT <<
RT_IDX_IDX_SHIFT); /* index */
break;
}
case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
RT_IDX_IDX_SHIFT); /* index */
break;
}
case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_MCAST: /* Pass up All Multicast frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
{
value = RT_IDX_DST_RSS | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case 0: /* Clear the E-bit on an entry. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(index << RT_IDX_IDX_SHIFT);/* index */
break;
}
default:
netif_err(qdev, ifup, qdev->ndev,
"Mask type %d not yet supported.\n", mask);
status = -EPERM;
goto exit;
}
if (value) {
status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
if (status)
goto exit;
value |= (enable ? RT_IDX_E : 0);
qlge_write32(qdev, RT_IDX, value);
qlge_write32(qdev, RT_DATA, enable ? mask : 0);
}
exit:
return status;
}
static void qlge_enable_interrupts(struct qlge_adapter *qdev)
{
qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
}
static void qlge_disable_interrupts(struct qlge_adapter *qdev)
{
qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
}
static void qlge_enable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
{
struct intr_context *ctx = &qdev->intr_context[intr];
qlge_write32(qdev, INTR_EN, ctx->intr_en_mask);
}
static void qlge_disable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
{
struct intr_context *ctx = &qdev->intr_context[intr];
qlge_write32(qdev, INTR_EN, ctx->intr_dis_mask);
}
static void qlge_enable_all_completion_interrupts(struct qlge_adapter *qdev)
{
int i;
for (i = 0; i < qdev->intr_count; i++)
qlge_enable_completion_interrupt(qdev, i);
}
static int qlge_validate_flash(struct qlge_adapter *qdev, u32 size, const char *str)
{
int status, i;
u16 csum = 0;
__le16 *flash = (__le16 *)&qdev->flash;
status = strncmp((char *)&qdev->flash, str, 4);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
return status;
}
for (i = 0; i < size; i++)
csum += le16_to_cpu(*flash++);
if (csum)
netif_err(qdev, ifup, qdev->ndev,
"Invalid flash checksum, csum = 0x%.04x.\n", csum);
return csum;
}
static int qlge_read_flash_word(struct qlge_adapter *qdev, int offset, __le32 *data)
{
int status = 0;
/* wait for reg to come ready */
status = qlge_wait_reg_rdy(qdev,
FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
/* set up for reg read */
qlge_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
/* wait for reg to come ready */
status = qlge_wait_reg_rdy(qdev,
FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
/* This data is stored on flash as an array of
* __le32. Since qlge_read32() returns cpu endian
* we need to swap it back.
*/
*data = cpu_to_le32(qlge_read32(qdev, FLASH_DATA));
exit:
return status;
}
static int qlge_get_8000_flash_params(struct qlge_adapter *qdev)
{
u32 i, size;
int status;
__le32 *p = (__le32 *)&qdev->flash;
u32 offset;
u8 mac_addr[6];
/* Get flash offset for function and adjust
* for dword access.
*/
if (!qdev->port)
offset = FUNC0_FLASH_OFFSET / sizeof(u32);
else
offset = FUNC1_FLASH_OFFSET / sizeof(u32);
if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
return -ETIMEDOUT;
size = sizeof(struct flash_params_8000) / sizeof(u32);
for (i = 0; i < size; i++, p++) {
status = qlge_read_flash_word(qdev, i + offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
goto exit;
}
}
status = qlge_validate_flash(qdev,
sizeof(struct flash_params_8000) /
sizeof(u16),
"8000");
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
/* Extract either manufacturer or BOFM modified
* MAC address.
*/
if (qdev->flash.flash_params_8000.data_type1 == 2)
memcpy(mac_addr,
qdev->flash.flash_params_8000.mac_addr1,
qdev->ndev->addr_len);
else
memcpy(mac_addr,
qdev->flash.flash_params_8000.mac_addr,
qdev->ndev->addr_len);
if (!is_valid_ether_addr(mac_addr)) {
netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
status = -EINVAL;
goto exit;
}
eth_hw_addr_set(qdev->ndev, mac_addr);
exit:
qlge_sem_unlock(qdev, SEM_FLASH_MASK);
return status;
}
static int qlge_get_8012_flash_params(struct qlge_adapter *qdev)
{
int i;
int status;
__le32 *p = (__le32 *)&qdev->flash;
u32 offset = 0;
u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
/* Second function's parameters follow the first
* function's.
*/
if (qdev->port)
offset = size;
if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
return -ETIMEDOUT;
for (i = 0; i < size; i++, p++) {
status = qlge_read_flash_word(qdev, i + offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
goto exit;
}
}
status = qlge_validate_flash(qdev,
sizeof(struct flash_params_8012) /
sizeof(u16),
"8012");
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
status = -EINVAL;
goto exit;
}
eth_hw_addr_set(qdev->ndev, qdev->flash.flash_params_8012.mac_addr);
exit:
qlge_sem_unlock(qdev, SEM_FLASH_MASK);
return status;
}
/* xgmac register are located behind the xgmac_addr and xgmac_data
* register pair. Each read/write requires us to wait for the ready
* bit before reading/writing the data.
*/
static int qlge_write_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
{
int status;
/* wait for reg to come ready */
status = qlge_wait_reg_rdy(qdev,
XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
return status;
/* write the data to the data reg */
qlge_write32(qdev, XGMAC_DATA, data);
/* trigger the write */
qlge_write32(qdev, XGMAC_ADDR, reg);
return status;
}
/* xgmac register are located behind the xgmac_addr and xgmac_data
* register pair. Each read/write requires us to wait for the ready
* bit before reading/writing the data.
*/
int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
{
int status = 0;
/* wait for reg to come ready */
status = qlge_wait_reg_rdy(qdev,
XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* set up for reg read */
qlge_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
/* wait for reg to come ready */
status = qlge_wait_reg_rdy(qdev,
XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* get the data */
*data = qlge_read32(qdev, XGMAC_DATA);
exit:
return status;
}
/* This is used for reading the 64-bit statistics regs. */
int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data)
{
int status = 0;
u32 hi = 0;
u32 lo = 0;
status = qlge_read_xgmac_reg(qdev, reg, &lo);
if (status)
goto exit;
status = qlge_read_xgmac_reg(qdev, reg + 4, &hi);
if (status)
goto exit;
*data = (u64)lo | ((u64)hi << 32);
exit:
return status;
}
static int qlge_8000_port_initialize(struct qlge_adapter *qdev)
{
int status;
/*
* Get MPI firmware version for driver banner
* and ethool info.
*/
status = qlge_mb_about_fw(qdev);
if (status)
goto exit;
status = qlge_mb_get_fw_state(qdev);
if (status)
goto exit;
/* Wake up a worker to get/set the TX/RX frame sizes. */
queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
exit:
return status;
}
/* Take the MAC Core out of reset.
* Enable statistics counting.
* Take the transmitter/receiver out of reset.
* This functionality may be done in the MPI firmware at a
* later date.
*/
static int qlge_8012_port_initialize(struct qlge_adapter *qdev)
{
int status = 0;
u32 data;
if (qlge_sem_trylock(qdev, qdev->xg_sem_mask)) {
/* Another function has the semaphore, so
* wait for the port init bit to come ready.
*/
netif_info(qdev, link, qdev->ndev,
"Another function has the semaphore, so wait for the port init bit to come ready.\n");
status = qlge_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
if (status) {
netif_crit(qdev, link, qdev->ndev,
"Port initialize timed out.\n");
}
return status;
}
netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
/* Set the core reset. */
status = qlge_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
if (status)
goto end;
data |= GLOBAL_CFG_RESET;
status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
if (status)
goto end;
/* Clear the core reset and turn on jumbo for receiver. */
data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
data |= GLOBAL_CFG_TX_STAT_EN;
data |= GLOBAL_CFG_RX_STAT_EN;
status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
if (status)
goto end;
/* Enable transmitter, and clear it's reset. */
status = qlge_read_xgmac_reg(qdev, TX_CFG, &data);
if (status)
goto end;
data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
data |= TX_CFG_EN; /* Enable the transmitter. */
status = qlge_write_xgmac_reg(qdev, TX_CFG, data);
if (status)
goto end;
/* Enable receiver and clear it's reset. */
status = qlge_read_xgmac_reg(qdev, RX_CFG, &data);
if (status)
goto end;
data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
data |= RX_CFG_EN; /* Enable the receiver. */
status = qlge_write_xgmac_reg(qdev, RX_CFG, data);
if (status)
goto end;
/* Turn on jumbo. */
status =
qlge_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
if (status)
goto end;
status =
qlge_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
if (status)
goto end;
/* Signal to the world that the port is enabled. */
qlge_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
end:
qlge_sem_unlock(qdev, qdev->xg_sem_mask);
return status;
}
static inline unsigned int qlge_lbq_block_size(struct qlge_adapter *qdev)
{
return PAGE_SIZE << qdev->lbq_buf_order;
}
static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
{
struct qlge_bq_desc *bq_desc;
bq_desc = &bq->queue[bq->next_to_clean];
bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
return bq_desc;
}
static struct qlge_bq_desc *qlge_get_curr_lchunk(struct qlge_adapter *qdev,
struct rx_ring *rx_ring)
{
struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
qdev->lbq_buf_size, DMA_FROM_DEVICE);
if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
qlge_lbq_block_size(qdev)) {
/* last chunk of the master page */
dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
}
return lbq_desc;
}
/* Update an rx ring index. */
static void qlge_update_cq(struct rx_ring *rx_ring)
{
rx_ring->cnsmr_idx++;
rx_ring->curr_entry++;
if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
rx_ring->cnsmr_idx = 0;
rx_ring->curr_entry = rx_ring->cq_base;
}
}
static void qlge_write_cq_idx(struct rx_ring *rx_ring)
{
qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
}
static const char * const bq_type_name[] = {
[QLGE_SB] = "sbq",
[QLGE_LB] = "lbq",
};
/* return 0 or negative error */
static int qlge_refill_sb(struct rx_ring *rx_ring,
struct qlge_bq_desc *sbq_desc, gfp_t gfp)
{
struct qlge_adapter *qdev = rx_ring->qdev;
struct sk_buff *skb;
if (sbq_desc->p.skb)
return 0;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"ring %u sbq: getting new skb for index %d.\n",
rx_ring->cq_id, sbq_desc->index);
skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
if (!skb)
return -ENOMEM;
skb_reserve(skb, QLGE_SB_PAD);
sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
SMALL_BUF_MAP_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
dev_kfree_skb_any(skb);
return -EIO;
}
*sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
sbq_desc->p.skb = skb;
return 0;
}
/* return 0 or negative error */
static int qlge_refill_lb(struct rx_ring *rx_ring,
struct qlge_bq_desc *lbq_desc, gfp_t gfp)
{
struct qlge_adapter *qdev = rx_ring->qdev;
struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
if (!master_chunk->page) {
struct page *page;
dma_addr_t dma_addr;
page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
if (unlikely(!page))
return -ENOMEM;
dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
qlge_lbq_block_size(qdev),
DMA_FROM_DEVICE);
if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
__free_pages(page, qdev->lbq_buf_order);
netif_err(qdev, drv, qdev->ndev,
"PCI mapping failed.\n");
return -EIO;
}
master_chunk->page = page;
master_chunk->va = page_address(page);
master_chunk->offset = 0;
rx_ring->chunk_dma_addr = dma_addr;
}
lbq_desc->p.pg_chunk = *master_chunk;
lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
*lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
lbq_desc->p.pg_chunk.offset);
/* Adjust the master page chunk for next
* buffer get.
*/
master_chunk->offset += qdev->lbq_buf_size;
if (master_chunk->offset == qlge_lbq_block_size(qdev)) {
master_chunk->page = NULL;
} else {
master_chunk->va += qdev->lbq_buf_size;
get_page(master_chunk->page);
}
return 0;
}
/* return 0 or negative error */
static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
{
struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
struct qlge_adapter *qdev = rx_ring->qdev;
struct qlge_bq_desc *bq_desc;
int refill_count;
int retval;
int i;
refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
bq->next_to_use);
if (!refill_count)
return 0;
i = bq->next_to_use;
bq_desc = &bq->queue[i];
i -= QLGE_BQ_LEN;
do {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"ring %u %s: try cleaning idx %d\n",
rx_ring->cq_id, bq_type_name[bq->type], i);
if (bq->type == QLGE_SB)
retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
else
retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
if (retval < 0) {
netif_err(qdev, ifup, qdev->ndev,
"ring %u %s: Could not get a page chunk, idx %d\n",
rx_ring->cq_id, bq_type_name[bq->type], i);
break;
}
bq_desc++;
i++;
if (unlikely(!i)) {
bq_desc = &bq->queue[0];
i -= QLGE_BQ_LEN;
}
refill_count--;
} while (refill_count);
i += QLGE_BQ_LEN;
if (bq->next_to_use != i) {
if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"ring %u %s: updating prod idx = %d.\n",
rx_ring->cq_id, bq_type_name[bq->type],
i);
qlge_write_db_reg(i, bq->prod_idx_db_reg);
}
bq->next_to_use = i;
}
return retval;
}
static void qlge_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
unsigned long delay)
{
bool sbq_fail, lbq_fail;
sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
/* Minimum number of buffers needed to be able to receive at least one
* frame of any format:
* sbq: 1 for header + 1 for data
* lbq: mtu 9000 / lb size
* Below this, the queue might stall.
*/
if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
(lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
/* Allocations can take a long time in certain cases (ex.
* reclaim). Therefore, use a workqueue for long-running
* work items.
*/
queue_delayed_work_on(smp_processor_id(), system_long_wq,
&rx_ring->refill_work, delay);
}
static void qlge_slow_refill(struct work_struct *work)
{
struct rx_ring *rx_ring = container_of(work, struct rx_ring,
refill_work.work);
struct napi_struct *napi = &rx_ring->napi;
napi_disable(napi);
qlge_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
napi_enable(napi);
local_bh_disable();
/* napi_disable() might have prevented incomplete napi work from being
* rescheduled.
*/
napi_schedule(napi);
/* trigger softirq processing */
local_bh_enable();
}
/* Unmaps tx buffers. Can be called from send() if a pci mapping
* fails at some stage, or from the interrupt when a tx completes.
*/
static void qlge_unmap_send(struct qlge_adapter *qdev,
struct tx_ring_desc *tx_ring_desc, int mapped)
{
int i;
for (i = 0; i < mapped; i++) {
if (i == 0 || (i == 7 && mapped > 7)) {
/*
* Unmap the skb->data area, or the
* external sglist (AKA the Outbound
* Address List (OAL)).
* If its the zeroeth element, then it's
* the skb->data area. If it's the 7th
* element and there is more than 6 frags,
* then its an OAL.
*/
if (i == 7) {
netif_printk(qdev, tx_done, KERN_DEBUG,
qdev->ndev,
"unmapping OAL area.\n");
}
dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
maplen),
DMA_TO_DEVICE);
} else {
netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
"unmapping frag %d.\n", i);
dma_unmap_page(&qdev->pdev->dev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
maplen), DMA_TO_DEVICE);
}
}
}
/* Map the buffers for this transmit. This will return
* NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
*/
static int qlge_map_send(struct qlge_adapter *qdev,
struct qlge_ob_mac_iocb_req *mac_iocb_ptr,
struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
{
int len = skb_headlen(skb);
dma_addr_t map;
int frag_idx, err, map_idx = 0;
struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
int frag_cnt = skb_shinfo(skb)->nr_frags;
if (frag_cnt) {
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"frag_cnt = %d.\n", frag_cnt);
}
/*
* Map the skb buffer first.
*/
map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping failed with error: %d\n", err);
return NETDEV_TX_BUSY;
}
tbd->len = cpu_to_le32(len);
tbd->addr = cpu_to_le64(map);
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
map_idx++;
/*
* This loop fills the remainder of the 8 address descriptors
* in the IOCB. If there are more than 7 fragments, then the
* eighth address desc will point to an external list (OAL).
* When this happens, the remainder of the frags will be stored
* in this list.
*/
for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
tbd++;
if (frag_idx == 6 && frag_cnt > 7) {
/* Let's tack on an sglist.
* Our control block will now
* look like this:
* iocb->seg[0] = skb->data
* iocb->seg[1] = frag[0]
* iocb->seg[2] = frag[1]
* iocb->seg[3] = frag[2]
* iocb->seg[4] = frag[3]
* iocb->seg[5] = frag[4]
* iocb->seg[6] = frag[5]
* iocb->seg[7] = ptr to OAL (external sglist)
* oal->seg[0] = frag[6]
* oal->seg[1] = frag[7]
* oal->seg[2] = frag[8]
* oal->seg[3] = frag[9]
* oal->seg[4] = frag[10]
* etc...
*/
/* Tack on the OAL in the eighth segment of IOCB. */
map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
sizeof(struct qlge_oal),
DMA_TO_DEVICE);
err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping outbound address list with error: %d\n",
err);
goto map_error;
}
tbd->addr = cpu_to_le64(map);
/*
* The length is the number of fragments
* that remain to be mapped times the length
* of our sglist (OAL).
*/
tbd->len =
cpu_to_le32((sizeof(struct tx_buf_desc) *
(frag_cnt - frag_idx)) | TX_DESC_C);
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
sizeof(struct qlge_oal));
tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
map_idx++;
}
map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping frags failed with error: %d.\n",
err);
goto map_error;
}
tbd->addr = cpu_to_le64(map);
tbd->len = cpu_to_le32(skb_frag_size(frag));
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
skb_frag_size(frag));
}
/* Save the number of segments we've mapped. */
tx_ring_desc->map_cnt = map_idx;
/* Terminate the last segment. */
tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
return NETDEV_TX_OK;
map_error:
/*
* If the first frag mapping failed, then i will be zero.
* This causes the unmap of the skb->data area. Otherwise
* we pass in the number of frags that mapped successfully
* so they can be umapped.
*/
qlge_unmap_send(qdev, tx_ring_desc, map_idx);
return NETDEV_TX_BUSY;
}
/* Categorizing receive firmware frame errors */
static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err,
struct rx_ring *rx_ring)
{
struct nic_stats *stats = &qdev->nic_stats;
stats->rx_err_count++;
rx_ring->rx_errors++;
switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
stats->rx_code_err++;
break;
case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
stats->rx_oversize_err++;
break;
case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
stats->rx_undersize_err++;
break;
case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
stats->rx_preamble_err++;
break;
case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
stats->rx_frame_len_err++;
break;
case IB_MAC_IOCB_RSP_ERR_CRC:
stats->rx_crc_err++;
break;
default:
break;
}
}
/*
* qlge_update_mac_hdr_len - helper routine to update the mac header length
* based on vlan tags if present
*/
static void qlge_update_mac_hdr_len(struct qlge_adapter *qdev,
struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
void *page, size_t *len)
{
u16 *tags;
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
return;
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
tags = (u16 *)page;
/* Look for stacked vlan tags in ethertype field */
if (tags[6] == ETH_P_8021Q &&
tags[8] == ETH_P_8021Q)
*len += 2 * VLAN_HLEN;
else
*len += VLAN_HLEN;
}
}
/* Process an inbound completion from an rx ring. */
static void qlge_process_mac_rx_gro_page(struct qlge_adapter *qdev,
struct rx_ring *rx_ring,
struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
u32 length, u16 vlan_id)
{
struct sk_buff *skb;
struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
put_page(lbq_desc->p.pg_chunk.page);
return;
}
napi->dev = qdev->ndev;
skb = napi_get_frags(napi);
if (!skb) {
netif_err(qdev, drv, qdev->ndev,
"Couldn't get an skb, exiting.\n");
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
}
prefetch(lbq_desc->p.pg_chunk.va);
__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
skb_shinfo(skb)->nr_frags++;
rx_ring->rx_packets++;
rx_ring->rx_bytes += length;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
napi_gro_frags(napi);
}
/* Process an inbound completion from an rx ring. */
static void qlge_process_mac_rx_page(struct qlge_adapter *qdev,
struct rx_ring *rx_ring,
struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
u32 length, u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
void *addr;
struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
size_t hlen = ETH_HLEN;
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
}
addr = lbq_desc->p.pg_chunk.va;
prefetch(addr);
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
goto err_out;
}
/* Update the MAC header length*/
qlge_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
if (skb->len > ndev->mtu + hlen) {
netif_err(qdev, drv, qdev->ndev,
"Segment too small, dropping.\n");
rx_ring->rx_dropped++;
goto err_out;
}
skb_put_data(skb, addr, hlen);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
skb->len += length - hlen;
skb->data_len += length - hlen;
skb->truesize += length - hlen;
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph =
(struct iphdr *)((u8 *)addr + hlen);
if (!(iph->frag_off &
htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
"UDP checksum done!\n");
}
}
}
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(napi, skb);
else
netif_receive_skb(skb);
return;
err_out:
dev_kfree_skb_any(skb);
put_page(lbq_desc->p.pg_chunk.page);
}
/* Process an inbound completion from an rx ring. */
static void qlge_process_mac_rx_skb(struct qlge_adapter *qdev,
struct rx_ring *rx_ring,
struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
u32 length, u16 vlan_id)
{
struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb, *new_skb;
skb = sbq_desc->p.skb;
/* Allocate new_skb and copy */
new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
if (!new_skb) {
rx_ring->rx_dropped++;
return;
}
skb_reserve(new_skb, NET_IP_ALIGN);
dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
skb_put_data(new_skb, skb->data, length);
skb = new_skb;
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
dev_kfree_skb_any(skb);
return;
}
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
qlge_check_lb_frame(qdev, skb);
dev_kfree_skb_any(skb);
return;
}
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
if (skb->len > ndev->mtu + ETH_HLEN) {
dev_kfree_skb_any(skb);
rx_ring->rx_dropped++;
return;
}
prefetch(skb->data);
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%s Multicast.\n",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_REG ? "Registered" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
}
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Promiscuous Packet.\n");
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
/* If rx checksum is on, and there are no
* csum or frame errors.
*/
if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
"UDP checksum done!\n");
}
}
}
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
netif_receive_skb(skb);
}
static void qlge_realign_skb(struct sk_buff *skb, int len)
{
void *temp_addr = skb->data;
/* Undo the skb_reserve(skb,32) we did before
* giving to hardware, and realign data on
* a 2-byte boundary.
*/
skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
memmove(skb->data, temp_addr, len);
}
/*
* This function builds an skb for the given inbound
* completion. It will be rewritten for readability in the near
* future, but for not it works well.
*/
static struct sk_buff *qlge_build_rx_skb(struct qlge_adapter *qdev,
struct rx_ring *rx_ring,
struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
struct qlge_bq_desc *lbq_desc, *sbq_desc;
struct sk_buff *skb = NULL;
size_t hlen = ETH_HLEN;
/*
* Handle the header buffer if present.
*/
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Header of %d bytes in small buffer.\n", hdr_len);
/*
* Headers fit nicely into a small buffer.
*/
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
skb = sbq_desc->p.skb;
qlge_realign_skb(skb, hdr_len);
skb_put(skb, hdr_len);
sbq_desc->p.skb = NULL;
}
/*
* Handle the data buffer(s).
*/
if (unlikely(!length)) { /* Is there data too? */
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"No Data buffer in this packet.\n");
return skb;
}
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Headers in small, data of %d bytes in small, combine them.\n",
length);
/*
* Data is less than small buffer size so it's
* stuffed in a small buffer.
* For this case we append the data
* from the "data" small buffer to the "header" small
* buffer.
*/
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
dma_sync_single_for_cpu(&qdev->pdev->dev,
sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE,
DMA_FROM_DEVICE);
skb_put_data(skb, sbq_desc->p.skb->data, length);
} else {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes in a single small buffer.\n",
length);
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
skb = sbq_desc->p.skb;
qlge_realign_skb(skb, length);
skb_put(skb, length);
dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE,
DMA_FROM_DEVICE);
sbq_desc->p.skb = NULL;
}
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Header in small, %d bytes in large. Chain large to small!\n",
length);
/*
* The data is in a single large buffer. We
* chain it to the header buffer's skb and let
* it rip.
*/
lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Chaining page at offset = %d, for %d bytes to skb.\n",
lbq_desc->p.pg_chunk.offset, length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset, length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
} else {
/*
* The headers and data are in a single large buffer. We
* copy it to a new skb and let it go. This can happen with
* jumbo mtu on a non-TCP/UDP frame.
*/
lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
if (!skb) {
netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
"No skb available, drop the packet.\n");
return NULL;
}
dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
qdev->lbq_buf_size,
DMA_FROM_DEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
qlge_update_mac_hdr_len(qdev, ib_mac_rsp,
lbq_desc->p.pg_chunk.va,
&hlen);
__pskb_pull_tail(skb, hlen);
}
} else {
/*
* The data is in a chain of large buffers
* pointed to by a small buffer. We loop
* thru and chain them to the our small header
* buffer's skb.
* frags: There are 18 max frags and our small
* buffer will hold 32 of them. The thing is,
* we'll use 3 max for our 9000 byte jumbo
* frames. If the MTU goes up we could
* eventually be in trouble.
*/
int size, i = 0;
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
* This is an non TCP/UDP IP frame, so
* the headers aren't split into a small
* buffer. We have to use the small buffer
* that contains our sg list as our skb to
* send upstairs. Copy the sg list here to
* a local buffer and use it to find the
* pages to chain.
*/
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers & data in chain of large.\n",
length);
skb = sbq_desc->p.skb;
sbq_desc->p.skb = NULL;
skb_reserve(skb, NET_IP_ALIGN);
}
do {
lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
size = min(length, qdev->lbq_buf_size);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Adding page %d to skb for %d bytes.\n",
i, size);
skb_fill_page_desc(skb, i,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset, size);
skb->len += size;
skb->data_len += size;
skb->truesize += size;
length -= size;
i++;
} while (length > 0);
qlge_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
&hlen);
__pskb_pull_tail(skb, hlen);
}
return skb;
}
/* Process an inbound completion from an rx ring. */
static void qlge_process_mac_split_rx_intr(struct qlge_adapter *qdev,
struct rx_ring *rx_ring,
struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
skb = qlge_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
if (unlikely(!skb)) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"No skb available, drop packet.\n");
rx_ring->rx_dropped++;
return;
}
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
dev_kfree_skb_any(skb);
return;
}
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
if (skb->len > ndev->mtu + ETH_HLEN) {
dev_kfree_skb_any(skb);
rx_ring->rx_dropped++;
return;
}
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
qlge_check_lb_frame(qdev, skb);
dev_kfree_skb_any(skb);
return;
}
prefetch(skb->data);
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_REG ? "Registered" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
rx_ring->rx_multicast++;
}
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Promiscuous Packet.\n");
}
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
/* If rx checksum is on, and there are no
* csum or frame errors.
*/
if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
}
}
}
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
netif_receive_skb(skb);
}
/* Process an inbound completion from an rx ring. */
static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
struct rx_ring *rx_ring,
struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
(qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
((le16_to_cpu(ib_mac_rsp->vlan_id) &
IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
*/
qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
/* The data fit in a single small buffer.
* Allocate a new skb, copy the data and
* return the buffer to the free pool.
*/
qlge_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
vlan_id);
} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
/* TCP packet in a page chunk that's been checksummed.
* Tack it on to our GRO skb and let it go.
*/
qlge_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
/* Non-TCP packet in a page chunk. Allocate an
* skb, tack it on frags, and send it up.
*/
qlge_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
vlan_id);
} else {
/* Non-TCP/UDP large frames that span multiple buffers
* can be processed correctly by the split frame logic.
*/
qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
vlan_id);
}
return (unsigned long)length;
}
/* Process an outbound completion from an rx ring. */
static void qlge_process_mac_tx_intr(struct qlge_adapter *qdev,
struct qlge_ob_mac_iocb_rsp *mac_rsp)
{
struct tx_ring *tx_ring;
struct tx_ring_desc *tx_ring_desc;
tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
tx_ring_desc = &tx_ring->q[mac_rsp->tid];
qlge_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
tx_ring->tx_packets++;
dev_kfree_skb(tx_ring_desc->skb);
tx_ring_desc->skb = NULL;
if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
OB_MAC_IOCB_RSP_S |
OB_MAC_IOCB_RSP_L |
OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
netif_warn(qdev, tx_done, qdev->ndev,
"Total descriptor length did not match transfer length.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
netif_warn(qdev, tx_done, qdev->ndev,
"Frame too short to be valid, not sent.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
netif_warn(qdev, tx_done, qdev->ndev,
"Frame too long, but sent anyway.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
netif_warn(qdev, tx_done, qdev->ndev,
"PCI backplane error. Frame not sent.\n");
}
}
atomic_inc(&tx_ring->tx_count);
}
/* Fire up a handler to reset the MPI processor. */
void qlge_queue_fw_error(struct qlge_adapter *qdev)
{
qlge_link_off(qdev);
queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
}
void qlge_queue_asic_error(struct qlge_adapter *qdev)
{
qlge_link_off(qdev);
qlge_disable_interrupts(qdev);
/* Clear adapter up bit to signal the recovery
* process that it shouldn't kill the reset worker
* thread
*/
clear_bit(QL_ADAPTER_UP, &qdev->flags);
/* Set asic recovery bit to indicate reset process that we are
* in fatal error recovery process rather than normal close
*/
set_bit(QL_ASIC_RECOVERY, &qdev->flags);
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
}
static void qlge_process_chip_ae_intr(struct qlge_adapter *qdev,
struct qlge_ib_ae_iocb_rsp *ib_ae_rsp)
{
switch (ib_ae_rsp->event) {
case MGMT_ERR_EVENT:
netif_err(qdev, rx_err, qdev->ndev,
"Management Processor Fatal Error.\n");
qlge_queue_fw_error(qdev);
return;
case CAM_LOOKUP_ERR_EVENT:
netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
netdev_err(qdev->ndev, "This event shouldn't occur.\n");
qlge_queue_asic_error(qdev);
return;
case SOFT_ECC_ERROR_EVENT:
netdev_err(qdev->ndev, "Soft ECC error detected.\n");
qlge_queue_asic_error(qdev);
break;
case PCI_ERR_ANON_BUF_RD:
netdev_err(qdev->ndev,
"PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
ib_ae_rsp->q_id);
qlge_queue_asic_error(qdev);
break;
default:
netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
ib_ae_rsp->event);
qlge_queue_asic_error(qdev);
break;
}
}
static int qlge_clean_outbound_rx_ring(struct rx_ring *rx_ring)
{
struct qlge_adapter *qdev = rx_ring->qdev;
u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
struct qlge_ob_mac_iocb_rsp *net_rsp = NULL;
int count = 0;
struct tx_ring *tx_ring;
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"cq_id = %d, prod = %d, cnsmr = %d\n",
rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = (struct qlge_ob_mac_iocb_rsp *)rx_ring->curr_entry;
rmb();
switch (net_rsp->opcode) {
case OPCODE_OB_MAC_TSO_IOCB:
case OPCODE_OB_MAC_IOCB:
qlge_process_mac_tx_intr(qdev, net_rsp);
break;
default:
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Hit default case, not handled! dropping the packet, opcode = %x.\n",
net_rsp->opcode);
}
count++;
qlge_update_cq(rx_ring);
prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
}
if (!net_rsp)
return 0;
qlge_write_cq_idx(rx_ring);
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
*/
netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
}
return count;
}
static int qlge_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
{
struct qlge_adapter *qdev = rx_ring->qdev;
u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
struct qlge_net_rsp_iocb *net_rsp;
int count = 0;
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"cq_id = %d, prod = %d, cnsmr = %d\n",
rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = rx_ring->curr_entry;
rmb();
switch (net_rsp->opcode) {
case OPCODE_IB_MAC_IOCB:
qlge_process_mac_rx_intr(qdev, rx_ring,
(struct qlge_ib_mac_iocb_rsp *)
net_rsp);
break;
case OPCODE_IB_AE_IOCB:
qlge_process_chip_ae_intr(qdev, (struct qlge_ib_ae_iocb_rsp *)
net_rsp);
break;
default:
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Hit default case, not handled! dropping the packet, opcode = %x.\n",
net_rsp->opcode);
break;
}
count++;
qlge_update_cq(rx_ring);
prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
if (count == budget)
break;
}
qlge_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
qlge_write_cq_idx(rx_ring);
return count;
}
static int qlge_napi_poll_msix(struct napi_struct *napi, int budget)
{
struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
struct qlge_adapter *qdev = rx_ring->qdev;
struct rx_ring *trx_ring;
int i, work_done = 0;
struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
/* Service the TX rings first. They start
* right after the RSS rings.
*/
for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
trx_ring = &qdev->rx_ring[i];
/* If this TX completion ring belongs to this vector and
* it's not empty then service it.
*/
if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
(qlge_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
trx_ring->cnsmr_idx)) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"%s: Servicing TX completion ring %d.\n",
__func__, trx_ring->cq_id);
qlge_clean_outbound_rx_ring(trx_ring);
}
}
/*
* Now service the RSS ring if it's active.
*/
if (qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
rx_ring->cnsmr_idx) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"%s: Servicing RX completion ring %d.\n",
__func__, rx_ring->cq_id);
work_done = qlge_clean_inbound_rx_ring(rx_ring, budget);
}
if (work_done < budget) {
napi_complete_done(napi, work_done);
qlge_enable_completion_interrupt(qdev, rx_ring->irq);
}
return work_done;
}
static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
}
}
/*
* qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
* based on the features to enable/disable hardware vlan accel
*/
static int qlge_update_hw_vlan_features(struct net_device *ndev,
netdev_features_t features)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
bool need_restart = netif_running(ndev);
int status = 0;
if (need_restart) {
status = qlge_adapter_down(qdev);
if (status) {
netif_err(qdev, link, qdev->ndev,
"Failed to bring down the adapter\n");
return status;
}
}
/* update the features with resent change */
ndev->features = features;
if (need_restart) {
status = qlge_adapter_up(qdev);
if (status) {
netif_err(qdev, link, qdev->ndev,
"Failed to bring up the adapter\n");
return status;
}
}
return status;
}
static int qlge_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
int err;
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
/* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features);
if (err)
return err;
qlge_vlan_mode(ndev, features);
}
return 0;
}
static int __qlge_vlan_rx_add_vid(struct qlge_adapter *qdev, u16 vid)
{
u32 enable_bit = MAC_ADDR_E;
int err;
err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init vlan address.\n");
return err;
}
static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int status;
int err;
status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
err = __qlge_vlan_rx_add_vid(qdev, vid);
set_bit(vid, qdev->active_vlans);
qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return err;
}
static int __qlge_vlan_rx_kill_vid(struct qlge_adapter *qdev, u16 vid)
{
u32 enable_bit = 0;
int err;
err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to clear vlan address.\n");
return err;
}
static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int status;
int err;
status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
err = __qlge_vlan_rx_kill_vid(qdev, vid);
clear_bit(vid, qdev->active_vlans);
qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return err;
}
static void qlge_restore_vlan(struct qlge_adapter *qdev)
{
int status;
u16 vid;
status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return;
for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
__qlge_vlan_rx_add_vid(qdev, vid);
qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
struct rx_ring *rx_ring = dev_id;
napi_schedule(&rx_ring->napi);
return IRQ_HANDLED;
}
/* This handles a fatal error, MPI activity, and the default
* rx_ring in an MSI-X multiple vector environment.
* In MSI/Legacy environment it also process the rest of
* the rx_rings.
*/
static irqreturn_t qlge_isr(int irq, void *dev_id)
{
struct rx_ring *rx_ring = dev_id;
struct qlge_adapter *qdev = rx_ring->qdev;
struct intr_context *intr_context = &qdev->intr_context[0];
u32 var;
int work_done = 0;
/* Experience shows that when using INTx interrupts, interrupts must
* be masked manually.
* When using MSI mode, INTR_EN_EN must be explicitly disabled
* (even though it is auto-masked), otherwise a later command to
* enable it is not effective.
*/
if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
qlge_disable_completion_interrupt(qdev, 0);
var = qlge_read32(qdev, STS);
/*
* Check for fatal error.
*/
if (var & STS_FE) {
qlge_disable_completion_interrupt(qdev, 0);
qlge_queue_asic_error(qdev);
netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
var = qlge_read32(qdev, ERR_STS);
netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
/*
* Check MPI processor activity.
*/
if ((var & STS_PI) &&
(qlge_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
/*
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
*/
netif_err(qdev, intr, qdev->ndev,
"Got MPI processor interrupt.\n");
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
qdev->workqueue, &qdev->mpi_work, 0);
work_done++;
}
/*
* Get the bit-mask that shows the active queues for this
* pass. Compare it to the queues that this irq services
* and call napi if there's a match.
*/
var = qlge_read32(qdev, ISR1);
if (var & intr_context->irq_mask) {
netif_info(qdev, intr, qdev->ndev,
"Waking handler for rx_ring[0].\n");
napi_schedule(&rx_ring->napi);
work_done++;
} else {
/* Experience shows that the device sometimes signals an
* interrupt but no work is scheduled from this function.
* Nevertheless, the interrupt is auto-masked. Therefore, we
* systematically re-enable the interrupt if we didn't
* schedule napi.
*/
qlge_enable_completion_interrupt(qdev, 0);
}
return work_done ? IRQ_HANDLED : IRQ_NONE;
}
static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
{
if (skb_is_gso(skb)) {
int err;
__be16 l3_proto = vlan_get_protocol(skb);
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
mac_iocb_ptr->total_hdrs_len =
cpu_to_le16(skb_tcp_all_headers(skb));
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
skb_transport_offset(skb)
<< OB_MAC_TRANSPORT_HDR_SHIFT);
mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
if (likely(l3_proto == htons(ETH_P_IP))) {
struct iphdr *iph = ip_hdr(skb);
iph->check = 0;
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
iph->daddr, 0,
IPPROTO_TCP,
0);
} else if (l3_proto == htons(ETH_P_IPV6)) {
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
}
return 1;
}
return 0;
}
static void qlge_hw_csum_setup(struct sk_buff *skb,
struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
{
int len;
struct iphdr *iph = ip_hdr(skb);
__sum16 *check;
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
len = (ntohs(iph->tot_len) - (iph->ihl << 2));
if (likely(iph->protocol == IPPROTO_TCP)) {
check = &(tcp_hdr(skb)->check);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
mac_iocb_ptr->total_hdrs_len =
cpu_to_le16(skb_transport_offset(skb) +
(tcp_hdr(skb)->doff << 2));
} else {
check = &(udp_hdr(skb)->check);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
mac_iocb_ptr->total_hdrs_len =
cpu_to_le16(skb_transport_offset(skb) +
sizeof(struct udphdr));
}
*check = ~csum_tcpudp_magic(iph->saddr,
iph->daddr, len, iph->protocol, 0);
}
static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
struct tx_ring_desc *tx_ring_desc;
int tso;
struct tx_ring *tx_ring;
u32 tx_ring_idx = (u32)skb->queue_mapping;
tx_ring = &qdev->tx_ring[tx_ring_idx];
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
netif_info(qdev, tx_queued, qdev->ndev,
"%s: BUG! shutting down tx queue %d due to lack of resources.\n",
__func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id);
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
mac_iocb_ptr = tx_ring_desc->queue_entry;
memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
mac_iocb_ptr->tid = tx_ring_desc->index;
/* We use the upper 32-bits to store the tx queue for this IO.
* When we get the completion we can use it to establish the context.
*/
mac_iocb_ptr->txq_idx = tx_ring_idx;
tx_ring_desc->skb = skb;
mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
if (skb_vlan_tag_present(skb)) {
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
}
tso = qlge_tso(skb, (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
qlge_hw_csum_setup(skb,
(struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
}
if (qlge_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
NETDEV_TX_OK) {
netif_err(qdev, tx_queued, qdev->ndev,
"Could not map the segments.\n");
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
tx_ring->prod_idx++;
if (tx_ring->prod_idx == tx_ring->wq_len)
tx_ring->prod_idx = 0;
wmb();
qlge_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len);
atomic_dec(&tx_ring->tx_count);
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
netif_stop_subqueue(ndev, tx_ring->wq_id);
if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
*/
netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
}
return NETDEV_TX_OK;
}
static void qlge_free_shadow_space(struct qlge_adapter *qdev)
{
if (qdev->rx_ring_shadow_reg_area) {
dma_free_coherent(&qdev->pdev->dev,
PAGE_SIZE,
qdev->rx_ring_shadow_reg_area,
qdev->rx_ring_shadow_reg_dma);
qdev->rx_ring_shadow_reg_area = NULL;
}
if (qdev->tx_ring_shadow_reg_area) {
dma_free_coherent(&qdev->pdev->dev,
PAGE_SIZE,
qdev->tx_ring_shadow_reg_area,
qdev->tx_ring_shadow_reg_dma);
qdev->tx_ring_shadow_reg_area = NULL;
}
}
static int qlge_alloc_shadow_space(struct qlge_adapter *qdev)
{
qdev->rx_ring_shadow_reg_area =
dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
&qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
if (!qdev->rx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of RX shadow space failed.\n");
return -ENOMEM;
}
qdev->tx_ring_shadow_reg_area =
dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
&qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
if (!qdev->tx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
}
return 0;
err_wqp_sh_area:
dma_free_coherent(&qdev->pdev->dev,
PAGE_SIZE,
qdev->rx_ring_shadow_reg_area,
qdev->rx_ring_shadow_reg_dma);
return -ENOMEM;
}
static void qlge_init_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
{
struct tx_ring_desc *tx_ring_desc;
int i;
struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
mac_iocb_ptr = tx_ring->wq_base;
tx_ring_desc = tx_ring->q;
for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring_desc->index = i;
tx_ring_desc->skb = NULL;
tx_ring_desc->queue_entry = mac_iocb_ptr;
mac_iocb_ptr++;
tx_ring_desc++;
}
atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
}
static void qlge_free_tx_resources(struct qlge_adapter *qdev,
struct tx_ring *tx_ring)
{
if (tx_ring->wq_base) {
dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
}
kfree(tx_ring->q);
tx_ring->q = NULL;
}
static int qlge_alloc_tx_resources(struct qlge_adapter *qdev,
struct tx_ring *tx_ring)
{
tx_ring->wq_base =
dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
&tx_ring->wq_base_dma, GFP_ATOMIC);
if (!tx_ring->wq_base ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
goto pci_alloc_err;
tx_ring->q =
kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
GFP_KERNEL);
if (!tx_ring->q)
goto err;
return 0;
err:
dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
pci_alloc_err:
netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
return -ENOMEM;
}
static void qlge_free_lbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
{
struct qlge_bq *lbq = &rx_ring->lbq;
unsigned int last_offset;
last_offset = qlge_lbq_block_size(qdev) - qdev->lbq_buf_size;
while (lbq->next_to_clean != lbq->next_to_use) {
struct qlge_bq_desc *lbq_desc =
&lbq->queue[lbq->next_to_clean];
if (lbq_desc->p.pg_chunk.offset == last_offset)
dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
qlge_lbq_block_size(qdev),
DMA_FROM_DEVICE);
put_page(lbq_desc->p.pg_chunk.page);
lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
}
if (rx_ring->master_chunk.page) {
dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
put_page(rx_ring->master_chunk.page);
rx_ring->master_chunk.page = NULL;
}
}
static void qlge_free_sbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
{
int i;
for (i = 0; i < QLGE_BQ_LEN; i++) {
struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
if (!sbq_desc) {
netif_err(qdev, ifup, qdev->ndev,
"sbq_desc %d is NULL.\n", i);
return;
}
if (sbq_desc->p.skb) {
dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE,
DMA_FROM_DEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
}
}
}
/* Free all large and small rx buffers associated
* with the completion queues for this device.
*/
static void qlge_free_rx_buffers(struct qlge_adapter *qdev)
{
int i;
for (i = 0; i < qdev->rx_ring_count; i++) {
struct rx_ring *rx_ring = &qdev->rx_ring[i];
if (rx_ring->lbq.queue)
qlge_free_lbq_buffers(qdev, rx_ring);
if (rx_ring->sbq.queue)
qlge_free_sbq_buffers(qdev, rx_ring);
}
}
static void qlge_alloc_rx_buffers(struct qlge_adapter *qdev)
{
int i;
for (i = 0; i < qdev->rss_ring_count; i++)
qlge_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
HZ / 2);
}
static int qlge_init_bq(struct qlge_bq *bq)
{
struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
struct qlge_adapter *qdev = rx_ring->qdev;
struct qlge_bq_desc *bq_desc;
__le64 *buf_ptr;
int i;
bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
&bq->base_dma, GFP_ATOMIC);
if (!bq->base)
return -ENOMEM;
bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
GFP_KERNEL);
if (!bq->queue)
return -ENOMEM;
buf_ptr = bq->base;
bq_desc = &bq->queue[0];
for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
bq_desc->p.skb = NULL;
bq_desc->index = i;
bq_desc->buf_ptr = buf_ptr;
}
return 0;
}
static void qlge_free_rx_resources(struct qlge_adapter *qdev,
struct rx_ring *rx_ring)
{
/* Free the small buffer queue. */
if (rx_ring->sbq.base) {
dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
rx_ring->sbq.base, rx_ring->sbq.base_dma);
rx_ring->sbq.base = NULL;
}
/* Free the small buffer queue control blocks. */
kfree(rx_ring->sbq.queue);
rx_ring->sbq.queue = NULL;
/* Free the large buffer queue. */
if (rx_ring->lbq.base) {
dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
rx_ring->lbq.base, rx_ring->lbq.base_dma);
rx_ring->lbq.base = NULL;
}
/* Free the large buffer queue control blocks. */
kfree(rx_ring->lbq.queue);
rx_ring->lbq.queue = NULL;
/* Free the rx queue. */
if (rx_ring->cq_base) {
dma_free_coherent(&qdev->pdev->dev,
rx_ring->cq_size,
rx_ring->cq_base, rx_ring->cq_base_dma);
rx_ring->cq_base = NULL;
}
}
/* Allocate queues and buffers for this completions queue based
* on the values in the parameter structure.
*/
static int qlge_alloc_rx_resources(struct qlge_adapter *qdev,
struct rx_ring *rx_ring)
{
/*
* Allocate the completion queue for this rx_ring.
*/
rx_ring->cq_base =
dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
&rx_ring->cq_base_dma, GFP_ATOMIC);
if (!rx_ring->cq_base) {
netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
return -ENOMEM;
}
if (rx_ring->cq_id < qdev->rss_ring_count &&
(qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
qlge_free_rx_resources(qdev, rx_ring);
return -ENOMEM;
}
return 0;
}
static void qlge_tx_ring_clean(struct qlge_adapter *qdev)
{
struct tx_ring *tx_ring;
struct tx_ring_desc *tx_ring_desc;
int i, j;
/*
* Loop through all queues and free
* any resources.
*/
for (j = 0; j < qdev->tx_ring_count; j++) {
tx_ring = &qdev->tx_ring[j];
for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring_desc = &tx_ring->q[i];
if (tx_ring_desc && tx_ring_desc->skb) {
netif_err(qdev, ifdown, qdev->ndev,
"Freeing lost SKB %p, from queue %d, index %d.\n",
tx_ring_desc->skb, j,
tx_ring_desc->index);
qlge_unmap_send(qdev, tx_ring_desc,
tx_ring_desc->map_cnt);
dev_kfree_skb(tx_ring_desc->skb);
tx_ring_desc->skb = NULL;
}
}
}
}
static void qlge_free_mem_resources(struct qlge_adapter *qdev)
{
int i;
for (i = 0; i < qdev->tx_ring_count; i++)
qlge_free_tx_resources(qdev, &qdev->tx_ring[i]);
for (i = 0; i < qdev->rx_ring_count; i++)
qlge_free_rx_resources(qdev, &qdev->rx_ring[i]);
qlge_free_shadow_space(qdev);
}
static int qlge_alloc_mem_resources(struct qlge_adapter *qdev)
{
int i;
/* Allocate space for our shadow registers and such. */
if (qlge_alloc_shadow_space(qdev))
return -ENOMEM;
for (i = 0; i < qdev->rx_ring_count; i++) {
if (qlge_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
netif_err(qdev, ifup, qdev->ndev,
"RX resource allocation failed.\n");
goto err_mem;
}
}
/* Allocate tx queue resources */
for (i = 0; i < qdev->tx_ring_count; i++) {
if (qlge_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
netif_err(qdev, ifup, qdev->ndev,
"TX resource allocation failed.\n");
goto err_mem;
}
}
return 0;
err_mem:
qlge_free_mem_resources(qdev);
return -ENOMEM;
}
/* Set up the rx ring control block and pass it to the chip.
* The control block is defined as
* "Completion Queue Initialization Control Block", or cqicb.
*/
static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
{
struct cqicb *cqicb = &rx_ring->cqicb;
void *shadow_reg = qdev->rx_ring_shadow_reg_area +
(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
u64 dma;
__le64 *base_indirect_ptr;
int page_entries;
/* Set up the shadow registers for this ring. */
rx_ring->prod_idx_sh_reg = shadow_reg;
rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
*rx_ring->prod_idx_sh_reg = 0;
shadow_reg += sizeof(u64);
shadow_reg_dma += sizeof(u64);
rx_ring->lbq.base_indirect = shadow_reg;
rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
rx_ring->sbq.base_indirect = shadow_reg;
rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
/* PCI doorbell mem area + 0x00 for consumer index register */
rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
rx_ring->cnsmr_idx = 0;
rx_ring->curr_entry = rx_ring->cq_base;
/* PCI doorbell mem area + 0x04 for valid register */
rx_ring->valid_db_reg = doorbell_area + 0x04;
/* PCI doorbell mem area + 0x18 for large buffer consumer */
rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
/* PCI doorbell mem area + 0x1c */
rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
memset((void *)cqicb, 0, sizeof(struct cqicb));
cqicb->msix_vect = rx_ring->irq;
cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
LEN_CPP_CONT);
cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
/*
* Set up the control block load flags.
*/
cqicb->flags = FLAGS_LC | /* Load queue base address */
FLAGS_LV | /* Load MSI-X vector */
FLAGS_LI; /* Load irq delay values */
if (rx_ring->cq_id < qdev->rss_ring_count) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
dma = (u64)rx_ring->lbq.base_dma;
base_indirect_ptr = rx_ring->lbq.base_indirect;
for (page_entries = 0;
page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
page_entries++) {
base_indirect_ptr[page_entries] = cpu_to_le64(dma);
dma += DB_PAGE_SIZE;
}
cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
cqicb->lbq_buf_size =
cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
rx_ring->lbq.next_to_use = 0;
rx_ring->lbq.next_to_clean = 0;
cqicb->flags |= FLAGS_LS; /* Load sbq values */
dma = (u64)rx_ring->sbq.base_dma;
base_indirect_ptr = rx_ring->sbq.base_indirect;
for (page_entries = 0;
page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
page_entries++) {
base_indirect_ptr[page_entries] = cpu_to_le64(dma);
dma += DB_PAGE_SIZE;
}
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq.base_indirect_dma);
cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
rx_ring->sbq.next_to_use = 0;
rx_ring->sbq.next_to_clean = 0;
}
if (rx_ring->cq_id < qdev->rss_ring_count) {
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
netif_napi_add(qdev->ndev, &rx_ring->napi,
qlge_napi_poll_msix);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
} else {
cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
}
err = qlge_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
return err;
}
return err;
}
static int qlge_start_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
{
struct wqicb *wqicb = (struct wqicb *)tx_ring;
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
void *shadow_reg = qdev->tx_ring_shadow_reg_area +
(tx_ring->wq_id * sizeof(u64));
u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
(tx_ring->wq_id * sizeof(u64));
int err = 0;
/*
* Assign doorbell registers for this tx_ring.
*/
/* TX PCI doorbell mem area for tx producer index */
tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
tx_ring->prod_idx = 0;
/* TX PCI doorbell mem area + 0x04 */
tx_ring->valid_db_reg = doorbell_area + 0x04;
/*
* Assign shadow registers for this tx_ring.
*/
tx_ring->cnsmr_idx_sh_reg = shadow_reg;
tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
wqicb->rid = 0;
wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
qlge_init_tx_ring(qdev, tx_ring);
err = qlge_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
(u16)tx_ring->wq_id);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
return err;
}
return err;
}
static void qlge_disable_msix(struct qlge_adapter *qdev)
{
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
pci_disable_msix(qdev->pdev);
clear_bit(QL_MSIX_ENABLED, &qdev->flags);
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
pci_disable_msi(qdev->pdev);
clear_bit(QL_MSI_ENABLED, &qdev->flags);
}
}
/* We start by trying to get the number of vectors
* stored in qdev->intr_count. If we don't get that
* many then we reduce the count and try again.
*/
static void qlge_enable_msix(struct qlge_adapter *qdev)
{
int i, err;
/* Get the MSIX vectors. */
if (qlge_irq_type == MSIX_IRQ) {
/* Try to alloc space for the msix struct,
* if it fails then go to MSI/legacy.
*/
qdev->msi_x_entry = kcalloc(qdev->intr_count,
sizeof(struct msix_entry),
GFP_KERNEL);
if (!qdev->msi_x_entry) {
qlge_irq_type = MSI_IRQ;
goto msi;
}
for (i = 0; i < qdev->intr_count; i++)
qdev->msi_x_entry[i].entry = i;
err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
1, qdev->intr_count);
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
netif_warn(qdev, ifup, qdev->ndev,
"MSI-X Enable failed, trying MSI.\n");
qlge_irq_type = MSI_IRQ;
} else {
qdev->intr_count = err;
set_bit(QL_MSIX_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"MSI-X Enabled, got %d vectors.\n",
qdev->intr_count);
return;
}
}
msi:
qdev->intr_count = 1;
if (qlge_irq_type == MSI_IRQ) {
if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
set_bit(QL_MSI_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"Running with MSI interrupts.\n");
return;
}
}
qlge_irq_type = LEG_IRQ;
set_bit(QL_LEGACY_ENABLED, &qdev->flags);
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Running with legacy interrupts.\n");
}
/* Each vector services 1 RSS ring and 1 or more
* TX completion rings. This function loops through
* the TX completion rings and assigns the vector that
* will service it. An example would be if there are
* 2 vectors (so 2 RSS rings) and 8 TX completion rings.
* This would mean that vector 0 would service RSS ring 0
* and TX completion rings 0,1,2 and 3. Vector 1 would
* service RSS ring 1 and TX completion rings 4,5,6 and 7.
*/
static void qlge_set_tx_vect(struct qlge_adapter *qdev)
{
int i, j, vect;
u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
/* Assign irq vectors to TX rx_rings.*/
for (vect = 0, j = 0, i = qdev->rss_ring_count;
i < qdev->rx_ring_count; i++) {
if (j == tx_rings_per_vector) {
vect++;
j = 0;
}
qdev->rx_ring[i].irq = vect;
j++;
}
} else {
/* For single vector all rings have an irq
* of zero.
*/
for (i = 0; i < qdev->rx_ring_count; i++)
qdev->rx_ring[i].irq = 0;
}
}
/* Set the interrupt mask for this vector. Each vector
* will service 1 RSS ring and 1 or more TX completion
* rings. This function sets up a bit mask per vector
* that indicates which rings it services.
*/
static void qlge_set_irq_mask(struct qlge_adapter *qdev, struct intr_context *ctx)
{
int j, vect = ctx->intr;
u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
/* Add the RSS ring serviced by this vector
* to the mask.
*/
ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
/* Add the TX ring(s) serviced by this vector
* to the mask.
*/
for (j = 0; j < tx_rings_per_vector; j++) {
ctx->irq_mask |=
(1 << qdev->rx_ring[qdev->rss_ring_count +
(vect * tx_rings_per_vector) + j].cq_id);
}
} else {
/* For single vector we just shift each queue's
* ID into the mask.
*/
for (j = 0; j < qdev->rx_ring_count; j++)
ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
}
}
/*
* Here we build the intr_context structures based on
* our rx_ring count and intr vector count.
* The intr_context structure is used to hook each vector
* to possibly different handlers.
*/
static void qlge_resolve_queues_to_irqs(struct qlge_adapter *qdev)
{
int i = 0;
struct intr_context *intr_context = &qdev->intr_context[0];
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
/* Each rx_ring has it's
* own intr_context since we have separate
* vectors for each queue.
*/
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
qdev->rx_ring[i].irq = i;
intr_context->intr = i;
intr_context->qdev = qdev;
/* Set up this vector's bit-mask that indicates
* which queues it services.
*/
qlge_set_irq_mask(qdev, intr_context);
/*
* We set up each vectors enable/disable/read bits so
* there's no bit/mask calculations in the critical path.
*/
intr_context->intr_en_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
| i;
intr_context->intr_dis_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
INTR_EN_IHD | i;
intr_context->intr_read_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
i;
if (i == 0) {
/* The first vector/queue handles
* broadcast/multicast, fatal errors,
* and firmware events. This in addition
* to normal inbound NAPI processing.
*/
intr_context->handler = qlge_isr;
sprintf(intr_context->name, "%s-rx-%d",
qdev->ndev->name, i);
} else {
/*
* Inbound queues handle unicast frames only.
*/
intr_context->handler = qlge_msix_rx_isr;
sprintf(intr_context->name, "%s-rx-%d",
qdev->ndev->name, i);
}
}
} else {
/*
* All rx_rings use the same intr_context since
* there is only one vector.
*/
intr_context->intr = 0;
intr_context->qdev = qdev;
/*
* We set up each vectors enable/disable/read bits so
* there's no bit/mask calculations in the critical path.
*/
intr_context->intr_en_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
intr_context->intr_dis_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_DISABLE;
if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
/* Experience shows that when using INTx interrupts,
* the device does not always auto-mask INTR_EN_EN.
* Moreover, masking INTR_EN_EN manually does not
* immediately prevent interrupt generation.
*/
intr_context->intr_en_mask |= INTR_EN_EI << 16 |
INTR_EN_EI;
intr_context->intr_dis_mask |= INTR_EN_EI << 16;
}
intr_context->intr_read_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
/*
* Single interrupt means one handler for all rings.
*/
intr_context->handler = qlge_isr;
sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
/* Set up this vector's bit-mask that indicates
* which queues it services. In this case there is
* a single vector so it will service all RSS and
* TX completion rings.
*/
qlge_set_irq_mask(qdev, intr_context);
}
/* Tell the TX completion rings which MSIx vector
* they will be using.
*/
qlge_set_tx_vect(qdev);
}
static void qlge_free_irq(struct qlge_adapter *qdev)
{
int i;
struct intr_context *intr_context = &qdev->intr_context[0];
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
if (intr_context->hooked) {
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
free_irq(qdev->msi_x_entry[i].vector,
&qdev->rx_ring[i]);
} else {
free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
}
}
}
qlge_disable_msix(qdev);
}
static int qlge_request_irq(struct qlge_adapter *qdev)
{
int i;
int status = 0;
struct pci_dev *pdev = qdev->pdev;
struct intr_context *intr_context = &qdev->intr_context[0];
qlge_resolve_queues_to_irqs(qdev);
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
status = request_irq(qdev->msi_x_entry[i].vector,
intr_context->handler,
0,
intr_context->name,
&qdev->rx_ring[i]);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed request for MSIX interrupt %d.\n",
i);
goto err_irq;
}
} else {
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"trying msi or legacy interrupts.\n");
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"%s: irq = %d.\n", __func__, pdev->irq);
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"%s: context->name = %s.\n", __func__,
intr_context->name);
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"%s: dev_id = 0x%p.\n", __func__,
&qdev->rx_ring[0]);
status =
request_irq(pdev->irq, qlge_isr,
test_bit(QL_MSI_ENABLED, &qdev->flags)
? 0
: IRQF_SHARED,
intr_context->name, &qdev->rx_ring[0]);
if (status)
goto err_irq;
netif_err(qdev, ifup, qdev->ndev,
"Hooked intr 0, queue type RX_Q, with name %s.\n",
intr_context->name);
}
intr_context->hooked = 1;
}
return status;
err_irq:
netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
qlge_free_irq(qdev);
return status;
}
static int qlge_start_rss(struct qlge_adapter *qdev)
{
static const u8 init_hash_seed[] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
struct ricb *ricb = &qdev->ricb;
int status = 0;
int i;
u8 *hash_id = (u8 *)ricb->hash_cq_id;
memset((void *)ricb, 0, sizeof(*ricb));
ricb->base_cq = RSS_L4K;
ricb->flags =
(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
ricb->mask = cpu_to_le16((u16)(0x3ff));
/*
* Fill out the Indirection Table.
*/
for (i = 0; i < 1024; i++)
hash_id[i] = (i & (qdev->rss_ring_count - 1));
memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
status = qlge_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
return status;
}
return status;
}
static int qlge_clear_routing_entries(struct qlge_adapter *qdev)
{
int i, status = 0;
status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
/* Clear all the entries in the routing table. */
for (i = 0; i < 16; i++) {
status = qlge_set_routing_reg(qdev, i, 0, 0);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for CAM packets.\n");
break;
}
}
qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
/* Initialize the frame-to-queue routing. */
static int qlge_route_initialize(struct qlge_adapter *qdev)
{
int status = 0;
/* Clear all the entries in the routing table. */
status = qlge_clear_routing_entries(qdev);
if (status)
return status;
status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
status = qlge_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
RT_IDX_IP_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for IP CSUM error packets.\n");
goto exit;
}
status = qlge_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
RT_IDX_TU_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for TCP/UDP CSUM error packets.\n");
goto exit;
}
status = qlge_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for broadcast packets.\n");
goto exit;
}
/* If we have more than one inbound queue, then turn on RSS in the
* routing block.
*/
if (qdev->rss_ring_count > 1) {
status = qlge_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
RT_IDX_RSS_MATCH, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for MATCH RSS packets.\n");
goto exit;
}
}
status = qlge_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
RT_IDX_CAM_HIT, 1);
if (status)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for CAM packets.\n");
exit:
qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
int qlge_cam_route_initialize(struct qlge_adapter *qdev)
{
int status, set;
/* If check if the link is up and use to
* determine if we are setting or clearing
* the MAC address in the CAM.
*/
set = qlge_read32(qdev, STS);
set &= qdev->port_link_up;
status = qlge_set_mac_addr(qdev, set);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
return status;
}
status = qlge_route_initialize(qdev);
if (status)
netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
return status;
}
static int qlge_adapter_initialize(struct qlge_adapter *qdev)
{
u32 value, mask;
int i;
int status = 0;
/*
* Set up the System register to halt on errors.
*/
value = SYS_EFE | SYS_FAE;
mask = value << 16;
qlge_write32(qdev, SYS, mask | value);
/* Set the default queue, and VLAN behavior. */
value = NIC_RCV_CFG_DFQ;
mask = NIC_RCV_CFG_DFQ_MASK;
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
value |= NIC_RCV_CFG_RV;
mask |= (NIC_RCV_CFG_RV << 16);
}
qlge_write32(qdev, NIC_RCV_CFG, (mask | value));
/* Set the MPI interrupt to enabled. */
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
/* Enable the function, set pagesize, enable error checking. */
value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
FSC_EC | FSC_VM_PAGE_4K;
value |= SPLT_SETTING;
/* Set/clear header splitting. */
mask = FSC_VM_PAGESIZE_MASK |
FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
qlge_write32(qdev, FSC, mask | value);
qlge_write32(qdev, SPLT_HDR, SPLT_LEN);
/* Set RX packet routing to use port/pci function on which the
* packet arrived on in addition to usual frame routing.
* This is helpful on bonding where both interfaces can have
* the same MAC address.
*/
qlge_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
/* Reroute all packets to our Interface.
* They may have been routed to MPI firmware
* due to WOL.
*/
value = qlge_read32(qdev, MGMT_RCV_CFG);
value &= ~MGMT_RCV_CFG_RM;
mask = 0xffff0000;
/* Sticky reg needs clearing due to WOL. */
qlge_write32(qdev, MGMT_RCV_CFG, mask);
qlge_write32(qdev, MGMT_RCV_CFG, mask | value);
/* Default WOL is enable on Mezz cards */
if (qdev->pdev->subsystem_device == 0x0068 ||
qdev->pdev->subsystem_device == 0x0180)
qdev->wol = WAKE_MAGIC;
/* Start up the rx queues. */
for (i = 0; i < qdev->rx_ring_count; i++) {
status = qlge_start_rx_ring(qdev, &qdev->rx_ring[i]);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to start rx ring[%d].\n", i);
return status;
}
}
/* If there is more than one inbound completion queue
* then download a RICB to configure RSS.
*/
if (qdev->rss_ring_count > 1) {
status = qlge_start_rss(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
return status;
}
}
/* Start up the tx queues. */
for (i = 0; i < qdev->tx_ring_count; i++) {
status = qlge_start_tx_ring(qdev, &qdev->tx_ring[i]);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to start tx ring[%d].\n", i);
return status;
}
}
/* Initialize the port and set the max framesize. */
status = qdev->nic_ops->port_initialize(qdev);
if (status)
netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
/* Set up the MAC address and frame routing filter. */
status = qlge_cam_route_initialize(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init CAM/Routing tables.\n");
return status;
}
/* Start NAPI for the RSS queues. */
for (i = 0; i < qdev->rss_ring_count; i++)
napi_enable(&qdev->rx_ring[i].napi);
return status;
}
/* Issue soft reset to chip. */
static int qlge_adapter_reset(struct qlge_adapter *qdev)
{
u32 value;
int status = 0;
unsigned long end_jiffies;
/* Clear all the entries in the routing table. */
status = qlge_clear_routing_entries(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
return status;
}
/* Check if bit is set then skip the mailbox command and
* clear the bit, else we are in normal reset process.
*/
if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
/* Stop management traffic. */
qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
/* Wait for the NIC and MGMNT FIFOs to empty. */
qlge_wait_fifo_empty(qdev);
} else {
clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
}
qlge_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
end_jiffies = jiffies + usecs_to_jiffies(30);
do {
value = qlge_read32(qdev, RST_FO);
if ((value & RST_FO_FR) == 0)
break;
cpu_relax();
} while (time_before(jiffies, end_jiffies));
if (value & RST_FO_FR) {
netif_err(qdev, ifdown, qdev->ndev,
"ETIMEDOUT!!! errored out of resetting the chip!\n");
status = -ETIMEDOUT;
}
/* Resume management traffic. */
qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
return status;
}
static void qlge_display_dev_info(struct net_device *ndev)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
netif_info(qdev, probe, qdev->ndev,
"Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
qdev->func,
qdev->port,
qdev->chip_rev_id & 0x0000000f,
qdev->chip_rev_id >> 4 & 0x0000000f,
qdev->chip_rev_id >> 8 & 0x0000000f,
qdev->chip_rev_id >> 12 & 0x0000000f);
netif_info(qdev, probe, qdev->ndev,
"MAC address %pM\n", ndev->dev_addr);
}
static int qlge_wol(struct qlge_adapter *qdev)
{
int status = 0;
u32 wol = MB_WOL_DISABLE;
/* The CAM is still intact after a reset, but if we
* are doing WOL, then we may need to program the
* routing regs. We would also need to issue the mailbox
* commands to instruct the MPI what to do per the ethtool
* settings.
*/
if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
WAKE_MCAST | WAKE_BCAST)) {
netif_err(qdev, ifdown, qdev->ndev,
"Unsupported WOL parameter. qdev->wol = 0x%x.\n",
qdev->wol);
return -EINVAL;
}
if (qdev->wol & WAKE_MAGIC) {
status = qlge_mb_wol_set_magic(qdev, 1);
if (status) {
netif_err(qdev, ifdown, qdev->ndev,
"Failed to set magic packet on %s.\n",
qdev->ndev->name);
return status;
}
netif_info(qdev, drv, qdev->ndev,
"Enabled magic packet successfully on %s.\n",
qdev->ndev->name);
wol |= MB_WOL_MAGIC_PKT;
}
if (qdev->wol) {
wol |= MB_WOL_MODE_ON;
status = qlge_mb_wol_mode(qdev, wol);
netif_err(qdev, drv, qdev->ndev,
"WOL %s (wol code 0x%x) on %s\n",
(status == 0) ? "Successfully set" : "Failed",
wol, qdev->ndev->name);
}
return status;
}
static void qlge_cancel_all_work_sync(struct qlge_adapter *qdev)
{
/* Don't kill the reset worker thread if we
* are in the process of recovery.
*/
if (test_bit(QL_ADAPTER_UP, &qdev->flags))
cancel_delayed_work_sync(&qdev->asic_reset_work);
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
}
static int qlge_adapter_down(struct qlge_adapter *qdev)
{
int i, status = 0;
qlge_link_off(qdev);
qlge_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
napi_disable(&qdev->rx_ring[i].napi);
clear_bit(QL_ADAPTER_UP, &qdev->flags);
qlge_disable_interrupts(qdev);
qlge_tx_ring_clean(qdev);
/* Call netif_napi_del() from common point. */
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
status = qlge_adapter_reset(qdev);
if (status)
netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
qdev->func);
qlge_free_rx_buffers(qdev);
return status;
}
static int qlge_adapter_up(struct qlge_adapter *qdev)
{
int err = 0;
err = qlge_adapter_initialize(qdev);
if (err) {
netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
goto err_init;
}
set_bit(QL_ADAPTER_UP, &qdev->flags);
qlge_alloc_rx_buffers(qdev);
/* If the port is initialized and the
* link is up the turn on the carrier.
*/
if ((qlge_read32(qdev, STS) & qdev->port_init) &&
(qlge_read32(qdev, STS) & qdev->port_link_up))
qlge_link_on(qdev);
/* Restore rx mode. */
clear_bit(QL_ALLMULTI, &qdev->flags);
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
/* Restore vlan setting. */
qlge_restore_vlan(qdev);
qlge_enable_interrupts(qdev);
qlge_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
return 0;
err_init:
qlge_adapter_reset(qdev);
return err;
}
static void qlge_release_adapter_resources(struct qlge_adapter *qdev)
{
qlge_free_mem_resources(qdev);
qlge_free_irq(qdev);
}
static int qlge_get_adapter_resources(struct qlge_adapter *qdev)
{
if (qlge_alloc_mem_resources(qdev)) {
netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
return -ENOMEM;
}
return qlge_request_irq(qdev);
}
static int qlge_close(struct net_device *ndev)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int i;
/* If we hit pci_channel_io_perm_failure
* failure condition, then we already
* brought the adapter down.
*/
if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
clear_bit(QL_EEH_FATAL, &qdev->flags);
return 0;
}
/*
* Wait for device to recover from a reset.
* (Rarely happens, but possible.)
*/
while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
msleep(1);
/* Make sure refill_work doesn't re-enable napi */
for (i = 0; i < qdev->rss_ring_count; i++)
cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
qlge_adapter_down(qdev);
qlge_release_adapter_resources(qdev);
return 0;
}
static void qlge_set_lb_size(struct qlge_adapter *qdev)
{
if (qdev->ndev->mtu <= 1500)
qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
else
qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
}
static int qlge_configure_rings(struct qlge_adapter *qdev)
{
int i;
struct rx_ring *rx_ring;
struct tx_ring *tx_ring;
int cpu_cnt = min_t(int, MAX_CPUS, num_online_cpus());
/* In a perfect world we have one RSS ring for each CPU
* and each has it's own vector. To do that we ask for
* cpu_cnt vectors. qlge_enable_msix() will adjust the
* vector count to what we actually get. We then
* allocate an RSS ring for each.
* Essentially, we are doing min(cpu_count, msix_vector_count).
*/
qdev->intr_count = cpu_cnt;
qlge_enable_msix(qdev);
/* Adjust the RSS ring count to the actual vector count. */
qdev->rss_ring_count = qdev->intr_count;
qdev->tx_ring_count = cpu_cnt;
qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
for (i = 0; i < qdev->tx_ring_count; i++) {
tx_ring = &qdev->tx_ring[i];
memset((void *)tx_ring, 0, sizeof(*tx_ring));
tx_ring->qdev = qdev;
tx_ring->wq_id = i;
tx_ring->wq_len = qdev->tx_ring_size;
tx_ring->wq_size =
tx_ring->wq_len * sizeof(struct qlge_ob_mac_iocb_req);
/*
* The completion queue ID for the tx rings start
* immediately after the rss rings.
*/
tx_ring->cq_id = qdev->rss_ring_count + i;
}
for (i = 0; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
memset((void *)rx_ring, 0, sizeof(*rx_ring));
rx_ring->qdev = qdev;
rx_ring->cq_id = i;
rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
if (i < qdev->rss_ring_count) {
/*
* Inbound (RSS) queues.
*/
rx_ring->cq_len = qdev->rx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
rx_ring->lbq.type = QLGE_LB;
rx_ring->sbq.type = QLGE_SB;
INIT_DELAYED_WORK(&rx_ring->refill_work,
&qlge_slow_refill);
} else {
/*
* Outbound queue handles outbound completions only.
*/
/* outbound cq is same size as tx_ring it services. */
rx_ring->cq_len = qdev->tx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
}
}
return 0;
}
static int qlge_open(struct net_device *ndev)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int err = 0;
err = qlge_adapter_reset(qdev);
if (err)
return err;
qlge_set_lb_size(qdev);
err = qlge_configure_rings(qdev);
if (err)
return err;
err = qlge_get_adapter_resources(qdev);
if (err)
goto error_up;
err = qlge_adapter_up(qdev);
if (err)
goto error_up;
return err;
error_up:
qlge_release_adapter_resources(qdev);
return err;
}
static int qlge_change_rx_buffers(struct qlge_adapter *qdev)
{
int status;
/* Wait for an outstanding reset to complete. */
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
int i = 4;
while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
netif_err(qdev, ifup, qdev->ndev,
"Waiting for adapter UP...\n");
ssleep(1);
}
if (!i) {
netif_err(qdev, ifup, qdev->ndev,
"Timed out waiting for adapter UP\n");
return -ETIMEDOUT;
}
}
status = qlge_adapter_down(qdev);
if (status)
goto error;
qlge_set_lb_size(qdev);
status = qlge_adapter_up(qdev);
if (status)
goto error;
return status;
error:
netif_alert(qdev, ifup, qdev->ndev,
"Driver up/down cycle failed, closing device.\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
return status;
}
static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int status;
if (ndev->mtu == 1500 && new_mtu == 9000)
netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
else if (ndev->mtu == 9000 && new_mtu == 1500)
netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
else
return -EINVAL;
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 3 * HZ);
ndev->mtu = new_mtu;
if (!netif_running(qdev->ndev))
return 0;
status = qlge_change_rx_buffers(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Changing MTU failed.\n");
}
return status;
}
static struct net_device_stats *qlge_get_stats(struct net_device
*ndev)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
struct rx_ring *rx_ring = &qdev->rx_ring[0];
struct tx_ring *tx_ring = &qdev->tx_ring[0];
unsigned long pkts, mcast, dropped, errors, bytes;
int i;
/* Get RX stats. */
pkts = mcast = dropped = errors = bytes = 0;
for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
pkts += rx_ring->rx_packets;
bytes += rx_ring->rx_bytes;
dropped += rx_ring->rx_dropped;
errors += rx_ring->rx_errors;
mcast += rx_ring->rx_multicast;
}
ndev->stats.rx_packets = pkts;
ndev->stats.rx_bytes = bytes;
ndev->stats.rx_dropped = dropped;
ndev->stats.rx_errors = errors;
ndev->stats.multicast = mcast;
/* Get TX stats. */
pkts = errors = bytes = 0;
for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
pkts += tx_ring->tx_packets;
bytes += tx_ring->tx_bytes;
errors += tx_ring->tx_errors;
}
ndev->stats.tx_packets = pkts;
ndev->stats.tx_bytes = bytes;
ndev->stats.tx_errors = errors;
return &ndev->stats;
}
static void qlge_set_multicast_list(struct net_device *ndev)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
struct netdev_hw_addr *ha;
int i, status;
status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return;
/*
* Set or clear promiscuous mode if a
* transition is taking place.
*/
if (ndev->flags & IFF_PROMISC) {
if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (qlge_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set promiscuous mode.\n");
} else {
set_bit(QL_PROMISCUOUS, &qdev->flags);
}
}
} else {
if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (qlge_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to clear promiscuous mode.\n");
} else {
clear_bit(QL_PROMISCUOUS, &qdev->flags);
}
}
}
/*
* Set or clear all multicast mode if a
* transition is taking place.
*/
if ((ndev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
if (qlge_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set all-multi mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
}
} else {
if (test_bit(QL_ALLMULTI, &qdev->flags)) {
if (qlge_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to clear all-multi mode.\n");
} else {
clear_bit(QL_ALLMULTI, &qdev->flags);
}
}
}
if (!netdev_mc_empty(ndev)) {
status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
goto exit;
i = 0;
netdev_for_each_mc_addr(ha, ndev) {
if (qlge_set_mac_addr_reg(qdev, (u8 *)ha->addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to loadmulticast address.\n");
qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
goto exit;
}
i++;
}
qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (qlge_set_routing_reg
(qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set multicast match mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
}
exit:
qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
}
static int qlge_set_mac_address(struct net_device *ndev, void *p)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
struct sockaddr *addr = p;
int status;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
eth_hw_addr_set(ndev, addr->sa_data);
/* Update local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
status = qlge_set_mac_addr_reg(qdev, (const u8 *)ndev->dev_addr,
MAC_ADDR_TYPE_CAM_MAC,
qdev->func * MAX_CQ);
if (status)
netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
qlge_queue_asic_error(qdev);
}
static void qlge_asic_reset_work(struct work_struct *work)
{
struct qlge_adapter *qdev =
container_of(work, struct qlge_adapter, asic_reset_work.work);
int status;
rtnl_lock();
status = qlge_adapter_down(qdev);
if (status)
goto error;
status = qlge_adapter_up(qdev);
if (status)
goto error;
/* Restore rx mode. */
clear_bit(QL_ALLMULTI, &qdev->flags);
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
rtnl_unlock();
return;
error:
netif_alert(qdev, ifup, qdev->ndev,
"Driver up/down cycle failed, closing device\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
rtnl_unlock();
}
static const struct nic_operations qla8012_nic_ops = {
.get_flash = qlge_get_8012_flash_params,
.port_initialize = qlge_8012_port_initialize,
};
static const struct nic_operations qla8000_nic_ops = {
.get_flash = qlge_get_8000_flash_params,
.port_initialize = qlge_8000_port_initialize,
};
/* Find the pcie function number for the other NIC
* on this chip. Since both NIC functions share a
* common firmware we have the lowest enabled function
* do any common work. Examples would be resetting
* after a fatal firmware error, or doing a firmware
* coredump.
*/
static int qlge_get_alt_pcie_func(struct qlge_adapter *qdev)
{
int status = 0;
u32 temp;
u32 nic_func1, nic_func2;
status = qlge_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
&temp);
if (status)
return status;
nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
MPI_TEST_NIC_FUNC_MASK);
nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
MPI_TEST_NIC_FUNC_MASK);
if (qdev->func == nic_func1)
qdev->alt_func = nic_func2;
else if (qdev->func == nic_func2)
qdev->alt_func = nic_func1;
else
status = -EIO;
return status;
}
static int qlge_get_board_info(struct qlge_adapter *qdev)
{
int status;
qdev->func =
(qlge_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
if (qdev->func > 3)
return -EIO;
status = qlge_get_alt_pcie_func(qdev);
if (status)
return status;
qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
if (qdev->port) {
qdev->xg_sem_mask = SEM_XGMAC1_MASK;
qdev->port_link_up = STS_PL1;
qdev->port_init = STS_PI1;
qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
} else {
qdev->xg_sem_mask = SEM_XGMAC0_MASK;
qdev->port_link_up = STS_PL0;
qdev->port_init = STS_PI0;
qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
}
qdev->chip_rev_id = qlge_read32(qdev, REV_ID);
qdev->device_id = qdev->pdev->device;
if (qdev->device_id == QLGE_DEVICE_ID_8012)
qdev->nic_ops = &qla8012_nic_ops;
else if (qdev->device_id == QLGE_DEVICE_ID_8000)
qdev->nic_ops = &qla8000_nic_ops;
return status;
}
static void qlge_release_all(struct pci_dev *pdev)
{
struct qlge_adapter *qdev = pci_get_drvdata(pdev);
if (qdev->workqueue) {
destroy_workqueue(qdev->workqueue);
qdev->workqueue = NULL;
}
if (qdev->reg_base)
iounmap(qdev->reg_base);
if (qdev->doorbell_area)
iounmap(qdev->doorbell_area);
vfree(qdev->mpi_coredump);
pci_release_regions(pdev);
}
static int qlge_init_device(struct pci_dev *pdev, struct qlge_adapter *qdev,
int cards_found)
{
struct net_device *ndev = qdev->ndev;
int err = 0;
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "PCI device enable failed.\n");
return err;
}
qdev->pdev = pdev;
pci_set_drvdata(pdev, qdev);
/* Set PCIe read request size */
err = pcie_set_readrq(pdev, 4096);
if (err) {
dev_err(&pdev->dev, "Set readrq failed.\n");
goto err_disable_pci;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "PCI region request failed.\n");
goto err_disable_pci;
}
pci_set_master(pdev);
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
set_bit(QL_DMA64, &qdev->flags);
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
} else {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (!err)
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
}
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration.\n");
goto err_release_pci;
}
/* Set PCIe reset type for EEH to fundamental. */
pdev->needs_freset = 1;
pci_save_state(pdev);
qdev->reg_base =
ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
err = -ENOMEM;
goto err_release_pci;
}
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
qdev->doorbell_area =
ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
err = -ENOMEM;
goto err_iounmap_base;
}
err = qlge_get_board_info(qdev);
if (err) {
dev_err(&pdev->dev, "Register access failed.\n");
err = -EIO;
goto err_iounmap_doorbell;
}
qdev->msg_enable = netif_msg_init(debug, default_msg);
spin_lock_init(&qdev->stats_lock);
if (qlge_mpi_coredump) {
qdev->mpi_coredump =
vmalloc(sizeof(struct qlge_mpi_coredump));
if (!qdev->mpi_coredump) {
err = -ENOMEM;
goto err_iounmap_doorbell;
}
if (qlge_force_coredump)
set_bit(QL_FRC_COREDUMP, &qdev->flags);
}
/* make sure the EEPROM is good */
err = qdev->nic_ops->get_flash(qdev);
if (err) {
dev_err(&pdev->dev, "Invalid FLASH.\n");
goto err_free_mpi_coredump;
}
/* Keep local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
/* Set up the default ring sizes. */
qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
/* Set up the coalescing parameters. */
qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
/*
* Set up the operating parameters.
*/
qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
ndev->name);
if (!qdev->workqueue) {
err = -ENOMEM;
goto err_free_mpi_coredump;
}
INIT_DELAYED_WORK(&qdev->asic_reset_work, qlge_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, qlge_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, qlge_mpi_work);
INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, qlge_mpi_port_cfg_work);
INIT_DELAYED_WORK(&qdev->mpi_idc_work, qlge_mpi_idc_work);
init_completion(&qdev->ide_completion);
mutex_init(&qdev->mpi_mutex);
if (!cards_found) {
dev_info(&pdev->dev, "%s\n", DRV_STRING);
dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
DRV_NAME, DRV_VERSION);
}
return 0;
err_free_mpi_coredump:
vfree(qdev->mpi_coredump);
err_iounmap_doorbell:
iounmap(qdev->doorbell_area);
err_iounmap_base:
iounmap(qdev->reg_base);
err_release_pci:
pci_release_regions(pdev);
err_disable_pci:
pci_disable_device(pdev);
return err;
}
static const struct net_device_ops qlge_netdev_ops = {
.ndo_open = qlge_open,
.ndo_stop = qlge_close,
.ndo_start_xmit = qlge_send,
.ndo_change_mtu = qlge_change_mtu,
.ndo_get_stats = qlge_get_stats,
.ndo_set_rx_mode = qlge_set_multicast_list,
.ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout,
.ndo_set_features = qlge_set_features,
.ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};
static void qlge_timer(struct timer_list *t)
{
struct qlge_adapter *qdev = from_timer(qdev, t, timer);
u32 var = 0;
var = qlge_read32(qdev, STS);
if (pci_channel_offline(qdev->pdev)) {
netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
return;
}
mod_timer(&qdev->timer, jiffies + (5 * HZ));
}
static const struct devlink_ops qlge_devlink_ops;
static int qlge_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_entry)
{
struct qlge_netdev_priv *ndev_priv;
struct qlge_adapter *qdev = NULL;
struct net_device *ndev = NULL;
struct devlink *devlink;
static int cards_found;
int err;
devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter),
&pdev->dev);
if (!devlink)
return -ENOMEM;
qdev = devlink_priv(devlink);
ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
min(MAX_CPUS,
netif_get_num_default_rss_queues()));
if (!ndev) {
err = -ENOMEM;
goto devlink_free;
}
ndev_priv = netdev_priv(ndev);
ndev_priv->qdev = qdev;
ndev_priv->ndev = ndev;
qdev->ndev = ndev;
err = qlge_init_device(pdev, qdev, cards_found);
if (err < 0)
goto netdev_free;
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_RXCSUM;
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
/* vlan gets same features (except vlan filter) */
ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX);
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
/*
* Set up net_device structure.
*/
ndev->tx_queue_len = qdev->tx_ring_size;
ndev->irq = pdev->irq;
ndev->netdev_ops = &qlge_netdev_ops;
ndev->ethtool_ops = &qlge_ethtool_ops;
ndev->watchdog_timeo = 10 * HZ;
/* MTU range: this driver only supports 1500 or 9000, so this only
* filters out values above or below, and we'll rely on
* qlge_change_mtu to make sure only 1500 or 9000 are allowed
*/
ndev->min_mtu = ETH_DATA_LEN;
ndev->max_mtu = 9000;
err = register_netdev(ndev);
if (err) {
dev_err(&pdev->dev, "net device registration failed.\n");
goto cleanup_pdev;
}
err = qlge_health_create_reporters(qdev);
if (err)
goto unregister_netdev;
/* Start up the timer to trigger EEH if
* the bus goes dead
*/
timer_setup(&qdev->timer, qlge_timer, TIMER_DEFERRABLE);
mod_timer(&qdev->timer, jiffies + (5 * HZ));
qlge_link_off(qdev);
qlge_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
cards_found++;
devlink_register(devlink);
return 0;
unregister_netdev:
unregister_netdev(ndev);
cleanup_pdev:
qlge_release_all(pdev);
pci_disable_device(pdev);
netdev_free:
free_netdev(ndev);
devlink_free:
devlink_free(devlink);
return err;
}
netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev)
{
return qlge_send(skb, ndev);
}
int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
{
return qlge_clean_inbound_rx_ring(rx_ring, budget);
}
static void qlge_remove(struct pci_dev *pdev)
{
struct qlge_adapter *qdev = pci_get_drvdata(pdev);
struct net_device *ndev = qdev->ndev;
struct devlink *devlink = priv_to_devlink(qdev);
devlink_unregister(devlink);
del_timer_sync(&qdev->timer);
qlge_cancel_all_work_sync(qdev);
unregister_netdev(ndev);
qlge_release_all(pdev);
pci_disable_device(pdev);
devlink_health_reporter_destroy(qdev->reporter);
devlink_free(devlink);
free_netdev(ndev);
}
/* Clean up resources without touching hardware. */
static void qlge_eeh_close(struct net_device *ndev)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int i;
if (netif_carrier_ok(ndev)) {
netif_carrier_off(ndev);
netif_stop_queue(ndev);
}
/* Disabling the timer */
qlge_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
clear_bit(QL_ADAPTER_UP, &qdev->flags);
qlge_tx_ring_clean(qdev);
qlge_free_rx_buffers(qdev);
qlge_release_adapter_resources(qdev);
}
/*
* This callback is called by the PCI subsystem whenever
* a PCI bus error is detected.
*/
static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct qlge_adapter *qdev = pci_get_drvdata(pdev);
struct net_device *ndev = qdev->ndev;
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
netif_device_detach(ndev);
del_timer_sync(&qdev->timer);
if (netif_running(ndev))
qlge_eeh_close(ndev);
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
del_timer_sync(&qdev->timer);
qlge_eeh_close(ndev);
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
/*
* This callback is called after the PCI buss has been reset.
* Basically, this tries to restart the card from scratch.
* This is a shortened version of the device probe/discovery code,
* it resembles the first-half of the () routine.
*/
static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
{
struct qlge_adapter *qdev = pci_get_drvdata(pdev);
pdev->error_state = pci_channel_io_normal;
pci_restore_state(pdev);
if (pci_enable_device(pdev)) {
netif_err(qdev, ifup, qdev->ndev,
"Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
if (qlge_adapter_reset(qdev)) {
netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_RECOVERED;
}
static void qlge_io_resume(struct pci_dev *pdev)
{
struct qlge_adapter *qdev = pci_get_drvdata(pdev);
struct net_device *ndev = qdev->ndev;
int err = 0;
if (netif_running(ndev)) {
err = qlge_open(ndev);
if (err) {
netif_err(qdev, ifup, qdev->ndev,
"Device initialization failed after reset.\n");
return;
}
} else {
netif_err(qdev, ifup, qdev->ndev,
"Device was not running prior to EEH.\n");
}
mod_timer(&qdev->timer, jiffies + (5 * HZ));
netif_device_attach(ndev);
}
static const struct pci_error_handlers qlge_err_handler = {
.error_detected = qlge_io_error_detected,
.slot_reset = qlge_io_slot_reset,
.resume = qlge_io_resume,
};
static int __maybe_unused qlge_suspend(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct qlge_adapter *qdev;
struct net_device *ndev;
int err;
qdev = pci_get_drvdata(pdev);
ndev = qdev->ndev;
netif_device_detach(ndev);
del_timer_sync(&qdev->timer);
if (netif_running(ndev)) {
err = qlge_adapter_down(qdev);
if (!err)
return err;
}
qlge_wol(qdev);
return 0;
}
static int __maybe_unused qlge_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct qlge_adapter *qdev;
struct net_device *ndev;
int err;
qdev = pci_get_drvdata(pdev);
ndev = qdev->ndev;
pci_set_master(pdev);
device_wakeup_disable(dev_d);
if (netif_running(ndev)) {
err = qlge_adapter_up(qdev);
if (err)
return err;
}
mod_timer(&qdev->timer, jiffies + (5 * HZ));
netif_device_attach(ndev);
return 0;
}
static void qlge_shutdown(struct pci_dev *pdev)
{
qlge_suspend(&pdev->dev);
}
static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
static struct pci_driver qlge_driver = {
.name = DRV_NAME,
.id_table = qlge_pci_tbl,
.probe = qlge_probe,
.remove = qlge_remove,
.driver.pm = &qlge_pm_ops,
.shutdown = qlge_shutdown,
.err_handler = &qlge_err_handler
};
module_pci_driver(qlge_driver);
| linux-master | drivers/staging/qlge/qlge_main.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/dmapool.h>
#include <linux/mempool.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include "qlge.h"
struct qlge_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
};
#define QL_SIZEOF(m) sizeof_field(struct qlge_adapter, m)
#define QL_OFF(m) offsetof(struct qlge_adapter, m)
static const struct qlge_stats qlge_gstrings_stats[] = {
{"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
{"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
{"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
QL_OFF(nic_stats.tx_mcast_pkts)},
{"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
QL_OFF(nic_stats.tx_bcast_pkts)},
{"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
QL_OFF(nic_stats.tx_ucast_pkts)},
{"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
QL_OFF(nic_stats.tx_ctl_pkts)},
{"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
QL_OFF(nic_stats.tx_pause_pkts)},
{"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
QL_OFF(nic_stats.tx_64_pkt)},
{"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
QL_OFF(nic_stats.tx_65_to_127_pkt)},
{"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
QL_OFF(nic_stats.tx_128_to_255_pkt)},
{"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
QL_OFF(nic_stats.tx_256_511_pkt)},
{"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
QL_OFF(nic_stats.tx_512_to_1023_pkt)},
{"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
{"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
QL_OFF(nic_stats.tx_1519_to_max_pkt)},
{"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
QL_OFF(nic_stats.tx_undersize_pkt)},
{"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
QL_OFF(nic_stats.tx_oversize_pkt)},
{"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
{"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
QL_OFF(nic_stats.rx_bytes_ok)},
{"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
{"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
QL_OFF(nic_stats.rx_pkts_ok)},
{"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
QL_OFF(nic_stats.rx_bcast_pkts)},
{"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
QL_OFF(nic_stats.rx_mcast_pkts)},
{"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
QL_OFF(nic_stats.rx_ucast_pkts)},
{"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
QL_OFF(nic_stats.rx_undersize_pkts)},
{"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
QL_OFF(nic_stats.rx_oversize_pkts)},
{"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
QL_OFF(nic_stats.rx_jabber_pkts)},
{"rx_undersize_fcerr_pkts",
QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
{"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
QL_OFF(nic_stats.rx_drop_events)},
{"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
QL_OFF(nic_stats.rx_fcerr_pkts)},
{"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
QL_OFF(nic_stats.rx_align_err)},
{"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
QL_OFF(nic_stats.rx_symbol_err)},
{"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
QL_OFF(nic_stats.rx_mac_err)},
{"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
QL_OFF(nic_stats.rx_ctl_pkts)},
{"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
QL_OFF(nic_stats.rx_pause_pkts)},
{"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
QL_OFF(nic_stats.rx_64_pkts)},
{"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
QL_OFF(nic_stats.rx_65_to_127_pkts)},
{"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
QL_OFF(nic_stats.rx_128_255_pkts)},
{"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
QL_OFF(nic_stats.rx_256_511_pkts)},
{"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
QL_OFF(nic_stats.rx_512_to_1023_pkts)},
{"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
{"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
QL_OFF(nic_stats.rx_1519_to_max_pkts)},
{"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
QL_OFF(nic_stats.rx_len_err_pkts)},
{"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
QL_OFF(nic_stats.rx_code_err)},
{"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
QL_OFF(nic_stats.rx_oversize_err)},
{"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
QL_OFF(nic_stats.rx_undersize_err)},
{"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
QL_OFF(nic_stats.rx_preamble_err)},
{"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
QL_OFF(nic_stats.rx_frame_len_err)},
{"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
QL_OFF(nic_stats.rx_crc_err)},
{"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
QL_OFF(nic_stats.rx_err_count)},
{"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
{"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
{"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
{"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
{"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
{"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
{"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
{"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
{"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
{"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
{"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
{"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
{"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
{"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
{"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
{"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
{"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
QL_OFF(nic_stats.rx_nic_fifo_drop)},
};
static const char qlge_gstrings_test[][ETH_GSTRING_LEN] = {
"Loopback test (offline)"
};
#define QLGE_TEST_LEN (sizeof(qlge_gstrings_test) / ETH_GSTRING_LEN)
#define QLGE_STATS_LEN ARRAY_SIZE(qlge_gstrings_stats)
#define QLGE_RCV_MAC_ERR_STATS 7
static int qlge_update_ring_coalescing(struct qlge_adapter *qdev)
{
int i, status = 0;
struct rx_ring *rx_ring;
struct cqicb *cqicb;
if (!netif_running(qdev->ndev))
return status;
/* Skip the default queue, and update the outbound handler
* queues if they changed.
*/
cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
cqicb = (struct cqicb *)rx_ring;
cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
cqicb->pkt_delay =
cpu_to_le16(qdev->tx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to load CQICB.\n");
goto exit;
}
}
}
/* Update the inbound (RSS) handler queues if they changed. */
cqicb = (struct cqicb *)&qdev->rx_ring[0];
if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
rx_ring = &qdev->rx_ring[i];
cqicb = (struct cqicb *)rx_ring;
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay =
cpu_to_le16(qdev->rx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to load CQICB.\n");
goto exit;
}
}
}
exit:
return status;
}
static void qlge_update_stats(struct qlge_adapter *qdev)
{
u32 i;
u64 data;
u64 *iter = &qdev->nic_stats.tx_pkts;
spin_lock(&qdev->stats_lock);
if (qlge_sem_spinlock(qdev, qdev->xg_sem_mask)) {
netif_err(qdev, drv, qdev->ndev,
"Couldn't get xgmac sem.\n");
goto quit;
}
/*
* Get TX statistics.
*/
for (i = 0x200; i < 0x280; i += 8) {
if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
goto end;
} else {
*iter = data;
}
iter++;
}
/*
* Get RX statistics.
*/
for (i = 0x300; i < 0x3d0; i += 8) {
if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
goto end;
} else {
*iter = data;
}
iter++;
}
/* Update receive mac error statistics */
iter += QLGE_RCV_MAC_ERR_STATS;
/*
* Get Per-priority TX pause frame counter statistics.
*/
for (i = 0x500; i < 0x540; i += 8) {
if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
goto end;
} else {
*iter = data;
}
iter++;
}
/*
* Get Per-priority RX pause frame counter statistics.
*/
for (i = 0x568; i < 0x5a8; i += 8) {
if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
goto end;
} else {
*iter = data;
}
iter++;
}
/*
* Get RX NIC FIFO DROP statistics.
*/
if (qlge_read_xgmac_reg64(qdev, 0x5b8, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n", i);
goto end;
} else {
*iter = data;
}
end:
qlge_sem_unlock(qdev, qdev->xg_sem_mask);
quit:
spin_unlock(&qdev->stats_lock);
}
static void qlge_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
int index;
switch (stringset) {
case ETH_SS_TEST:
memcpy(buf, *qlge_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
for (index = 0; index < QLGE_STATS_LEN; index++) {
memcpy(buf + index * ETH_GSTRING_LEN,
qlge_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
break;
}
}
static int qlge_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
return QLGE_TEST_LEN;
case ETH_SS_STATS:
return QLGE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
}
static void
qlge_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
int index, length;
length = QLGE_STATS_LEN;
qlge_update_stats(qdev);
for (index = 0; index < length; index++) {
char *p = (char *)qdev +
qlge_gstrings_stats[index].stat_offset;
*data++ = (qlge_gstrings_stats[index].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
}
}
static int qlge_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *ecmd)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
u32 supported, advertising;
supported = SUPPORTED_10000baseT_Full;
advertising = ADVERTISED_10000baseT_Full;
if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
STS_LINK_TYPE_10GBASET) {
supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->base.port = PORT_TP;
ecmd->base.autoneg = AUTONEG_ENABLE;
} else {
supported |= SUPPORTED_FIBRE;
advertising |= ADVERTISED_FIBRE;
ecmd->base.port = PORT_FIBRE;
}
ecmd->base.speed = SPEED_10000;
ecmd->base.duplex = DUPLEX_FULL;
ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
supported);
ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
advertising);
return 0;
}
static void qlge_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
strscpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
strscpy(drvinfo->version, qlge_driver_version,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"v%d.%d.%d",
(qdev->fw_rev_id & 0x00ff0000) >> 16,
(qdev->fw_rev_id & 0x0000ff00) >> 8,
(qdev->fw_rev_id & 0x000000ff));
strscpy(drvinfo->bus_info, pci_name(qdev->pdev),
sizeof(drvinfo->bus_info));
}
static void qlge_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
unsigned short ssys_dev = qdev->pdev->subsystem_device;
/* WOL is only supported for mezz card. */
if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
wol->supported = WAKE_MAGIC;
wol->wolopts = qdev->wol;
}
}
static int qlge_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
unsigned short ssys_dev = qdev->pdev->subsystem_device;
/* WOL is only supported for mezz card. */
if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
netif_info(qdev, drv, qdev->ndev,
"WOL is only supported for mezz card\n");
return -EOPNOTSUPP;
}
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
qdev->wol = wol->wolopts;
netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
return 0;
}
static int qlge_set_phys_id(struct net_device *ndev,
enum ethtool_phys_id_state state)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
switch (state) {
case ETHTOOL_ID_ACTIVE:
/* Save the current LED settings */
if (qlge_mb_get_led_cfg(qdev))
return -EIO;
/* Start blinking */
qlge_mb_set_led_cfg(qdev, QL_LED_BLINK);
return 0;
case ETHTOOL_ID_INACTIVE:
/* Restore LED settings */
if (qlge_mb_set_led_cfg(qdev, qdev->led_config))
return -EIO;
return 0;
default:
return -EINVAL;
}
}
static int qlge_start_loopback(struct qlge_adapter *qdev)
{
if (netif_carrier_ok(qdev->ndev)) {
set_bit(QL_LB_LINK_UP, &qdev->flags);
netif_carrier_off(qdev->ndev);
} else {
clear_bit(QL_LB_LINK_UP, &qdev->flags);
}
qdev->link_config |= CFG_LOOPBACK_PCS;
return qlge_mb_set_port_cfg(qdev);
}
static void qlge_stop_loopback(struct qlge_adapter *qdev)
{
qdev->link_config &= ~CFG_LOOPBACK_PCS;
qlge_mb_set_port_cfg(qdev);
if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
netif_carrier_on(qdev->ndev);
clear_bit(QL_LB_LINK_UP, &qdev->flags);
}
}
static void qlge_create_lb_frame(struct sk_buff *skb,
unsigned int frame_size)
{
memset(skb->data, 0xFF, frame_size);
frame_size &= ~1;
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
skb->data[frame_size / 2 + 10] = (unsigned char)0xBE;
skb->data[frame_size / 2 + 12] = (unsigned char)0xAF;
}
void qlge_check_lb_frame(struct qlge_adapter *qdev,
struct sk_buff *skb)
{
unsigned int frame_size = skb->len;
if ((*(skb->data + 3) == 0xFF) &&
(*(skb->data + frame_size / 2 + 10) == 0xBE) &&
(*(skb->data + frame_size / 2 + 12) == 0xAF)) {
atomic_dec(&qdev->lb_count);
return;
}
}
static int qlge_run_loopback_test(struct qlge_adapter *qdev)
{
int i;
netdev_tx_t rc;
struct sk_buff *skb;
unsigned int size = SMALL_BUF_MAP_SIZE;
for (i = 0; i < 64; i++) {
skb = netdev_alloc_skb(qdev->ndev, size);
if (!skb)
return -ENOMEM;
skb->queue_mapping = 0;
skb_put(skb, size);
qlge_create_lb_frame(skb, size);
rc = qlge_lb_send(skb, qdev->ndev);
if (rc != NETDEV_TX_OK)
return -EPIPE;
atomic_inc(&qdev->lb_count);
}
/* Give queue time to settle before testing results. */
usleep_range(2000, 2100);
qlge_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
return atomic_read(&qdev->lb_count) ? -EIO : 0;
}
static int qlge_loopback_test(struct qlge_adapter *qdev, u64 *data)
{
*data = qlge_start_loopback(qdev);
if (*data)
goto out;
*data = qlge_run_loopback_test(qdev);
out:
qlge_stop_loopback(qdev);
return *data;
}
static void qlge_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
if (netif_running(ndev)) {
set_bit(QL_SELFTEST, &qdev->flags);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
if (qlge_loopback_test(qdev, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
} else {
/* Online tests */
data[0] = 0;
}
clear_bit(QL_SELFTEST, &qdev->flags);
/* Give link time to come up after
* port configuration changes.
*/
msleep_interruptible(4 * 1000);
} else {
netif_err(qdev, drv, qdev->ndev,
"is down, Loopback test will fail.\n");
eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
static int qlge_get_regs_len(struct net_device *ndev)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
return sizeof(struct qlge_mpi_coredump);
else
return sizeof(struct qlge_reg_dump);
}
static void qlge_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
qlge_get_dump(qdev, p);
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
regs->len = sizeof(struct qlge_mpi_coredump);
else
regs->len = sizeof(struct qlge_reg_dump);
}
static int qlge_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *c,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
/* This chip coalesces as follows:
* If a packet arrives, hold off interrupts until
* cqicb->int_delay expires, but if no other packets arrive don't
* wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
* timer to coalesce on a frame basis. So, we have to take ethtool's
* max_coalesced_frames value and convert it to a delay in microseconds.
* We do this by using a basic thoughput of 1,000,000 frames per
* second @ (1024 bytes). This means one frame per usec. So it's a
* simple one to one ratio.
*/
c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
return 0;
}
static int qlge_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *c,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
/* Validate user parameters. */
if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
return -EINVAL;
/* Don't wait more than 10 usec. */
if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
return -EINVAL;
if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
return -EINVAL;
if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
return -EINVAL;
/* Verify a change took place before updating the hardware. */
if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
return 0;
qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
return qlge_update_ring_coalescing(qdev);
}
static void qlge_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
qlge_mb_get_port_cfg(qdev);
if (qdev->link_config & CFG_PAUSE_STD) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
}
static int qlge_set_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
if ((pause->rx_pause) && (pause->tx_pause))
qdev->link_config |= CFG_PAUSE_STD;
else if (!pause->rx_pause && !pause->tx_pause)
qdev->link_config &= ~CFG_PAUSE_STD;
else
return -EINVAL;
return qlge_mb_set_port_cfg(qdev);
}
static u32 qlge_get_msglevel(struct net_device *ndev)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
return qdev->msg_enable;
}
static void qlge_set_msglevel(struct net_device *ndev, u32 value)
{
struct qlge_adapter *qdev = netdev_to_qdev(ndev);
qdev->msg_enable = value;
}
const struct ethtool_ops qlge_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = qlge_get_drvinfo,
.get_wol = qlge_get_wol,
.set_wol = qlge_set_wol,
.get_regs_len = qlge_get_regs_len,
.get_regs = qlge_get_regs,
.get_msglevel = qlge_get_msglevel,
.set_msglevel = qlge_set_msglevel,
.get_link = ethtool_op_get_link,
.set_phys_id = qlge_set_phys_id,
.self_test = qlge_self_test,
.get_pauseparam = qlge_get_pauseparam,
.set_pauseparam = qlge_set_pauseparam,
.get_coalesce = qlge_get_coalesce,
.set_coalesce = qlge_set_coalesce,
.get_sset_count = qlge_get_sset_count,
.get_strings = qlge_get_strings,
.get_ethtool_stats = qlge_get_ethtool_stats,
.get_link_ksettings = qlge_get_link_ksettings,
};
| linux-master | drivers/staging/qlge/qlge_ethtool.c |
// SPDX-License-Identifier: GPL-2.0
#include "qlge.h"
int qlge_unpause_mpi_risc(struct qlge_adapter *qdev)
{
u32 tmp;
/* Un-pause the RISC */
tmp = qlge_read32(qdev, CSR);
if (!(tmp & CSR_RP))
return -EIO;
qlge_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
return 0;
}
int qlge_pause_mpi_risc(struct qlge_adapter *qdev)
{
u32 tmp;
int count;
/* Pause the RISC */
qlge_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
for (count = UDELAY_COUNT; count; count--) {
tmp = qlge_read32(qdev, CSR);
if (tmp & CSR_RP)
break;
mdelay(UDELAY_DELAY);
}
return (count == 0) ? -ETIMEDOUT : 0;
}
int qlge_hard_reset_mpi_risc(struct qlge_adapter *qdev)
{
u32 tmp;
int count;
/* Reset the RISC */
qlge_write32(qdev, CSR, CSR_CMD_SET_RST);
for (count = UDELAY_COUNT; count; count--) {
tmp = qlge_read32(qdev, CSR);
if (tmp & CSR_RR) {
qlge_write32(qdev, CSR, CSR_CMD_CLR_RST);
break;
}
mdelay(UDELAY_DELAY);
}
return (count == 0) ? -ETIMEDOUT : 0;
}
int qlge_read_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
{
int status;
/* wait for reg to come ready */
status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
/* set up for reg read */
qlge_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
/* wait for reg to come ready */
status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
/* get the data */
*data = qlge_read32(qdev, PROC_DATA);
exit:
return status;
}
int qlge_write_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
{
int status = 0;
/* wait for reg to come ready */
status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
/* write the data to the data reg */
qlge_write32(qdev, PROC_DATA, data);
/* trigger the write */
qlge_write32(qdev, PROC_ADDR, reg);
/* wait for reg to come ready */
status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
exit:
return status;
}
int qlge_soft_reset_mpi_risc(struct qlge_adapter *qdev)
{
return qlge_write_mpi_reg(qdev, 0x00001010, 1);
}
/* Determine if we are in charge of the firmware. If
* we are the lower of the 2 NIC pcie functions, or if
* we are the higher function and the lower function
* is not enabled.
*/
int qlge_own_firmware(struct qlge_adapter *qdev)
{
u32 temp;
/* If we are the lower of the 2 NIC functions
* on the chip the we are responsible for
* core dump and firmware reset after an error.
*/
if (qdev->func < qdev->alt_func)
return 1;
/* If we are the higher of the 2 NIC functions
* on the chip and the lower function is not
* enabled, then we are responsible for
* core dump and firmware reset after an error.
*/
temp = qlge_read32(qdev, STS);
if (!(temp & (1 << (8 + qdev->alt_func))))
return 1;
return 0;
}
static int qlge_get_mb_sts(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
status = qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK);
if (status)
return -EBUSY;
for (i = 0; i < mbcp->out_count; i++) {
status =
qlge_read_mpi_reg(qdev, qdev->mailbox_out + i,
&mbcp->mbox_out[i]);
if (status) {
netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
break;
}
}
qlge_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
return status;
}
/* Wait for a single mailbox command to complete.
* Returns zero on success.
*/
static int qlge_wait_mbx_cmd_cmplt(struct qlge_adapter *qdev)
{
int count;
u32 value;
for (count = 100; count; count--) {
value = qlge_read32(qdev, STS);
if (value & STS_PI)
return 0;
mdelay(UDELAY_DELAY); /* 100ms */
}
return -ETIMEDOUT;
}
/* Execute a single mailbox command.
* Caller must hold PROC_ADDR semaphore.
*/
static int qlge_exec_mb_cmd(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
/*
* Make sure there's nothing pending.
* This shouldn't happen.
*/
if (qlge_read32(qdev, CSR) & CSR_HRI)
return -EIO;
status = qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK);
if (status)
return status;
/*
* Fill the outbound mailboxes.
*/
for (i = 0; i < mbcp->in_count; i++) {
status = qlge_write_mpi_reg(qdev, qdev->mailbox_in + i,
mbcp->mbox_in[i]);
if (status)
goto end;
}
/*
* Wake up the MPI firmware.
*/
qlge_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
end:
qlge_sem_unlock(qdev, SEM_PROC_REG_MASK);
return status;
}
/* We are being asked by firmware to accept
* a change to the port. This is only
* a change to max frame sizes (Tx/Rx), pause
* parameters, or loopback mode. We wake up a worker
* to handler processing this since a mailbox command
* will need to be sent to ACK the request.
*/
static int qlge_idc_req_aen(struct qlge_adapter *qdev)
{
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
netif_err(qdev, drv, qdev->ndev, "Enter!\n");
/* Get the status data and start up a thread to
* handle the request.
*/
mbcp->out_count = 4;
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Could not read MPI, resetting ASIC!\n");
qlge_queue_asic_error(qdev);
} else {
/* Begin polled mode early so
* we don't get another interrupt
* when we leave mpi_worker.
*/
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
}
return status;
}
/* Process an inter-device event completion.
* If good, signal the caller's completion.
*/
static int qlge_idc_cmplt_aen(struct qlge_adapter *qdev)
{
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
mbcp->out_count = 4;
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Could not read MPI, resetting RISC!\n");
qlge_queue_fw_error(qdev);
} else {
/* Wake up the sleeping mpi_idc_work thread that is
* waiting for this event.
*/
complete(&qdev->ide_completion);
}
return status;
}
static void qlge_link_up(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 2;
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"%s: Could not get mailbox status.\n", __func__);
return;
}
qdev->link_status = mbcp->mbox_out[1];
netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
/* If we're coming back from an IDC event
* then set up the CAM and frame routing.
*/
if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
status = qlge_cam_route_initialize(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init CAM/Routing tables.\n");
return;
}
clear_bit(QL_CAM_RT_SET, &qdev->flags);
}
/* Queue up a worker to check the frame
* size information, and fix it if it's not
* to our liking.
*/
if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
set_bit(QL_PORT_CFG, &qdev->flags);
/* Begin polled mode early so
* we don't get another interrupt
* when we leave mpi_worker dpc.
*/
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 0);
}
qlge_link_on(qdev);
}
static void qlge_link_down(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 3;
status = qlge_get_mb_sts(qdev, mbcp);
if (status)
netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
qlge_link_off(qdev);
}
static int qlge_sfp_in(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 5;
status = qlge_get_mb_sts(qdev, mbcp);
if (status)
netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
else
netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
return status;
}
static int qlge_sfp_out(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 1;
status = qlge_get_mb_sts(qdev, mbcp);
if (status)
netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
else
netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
return status;
}
static int qlge_aen_lost(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 6;
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
} else {
int i;
netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
for (i = 0; i < mbcp->out_count; i++)
netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
i, mbcp->mbox_out[i]);
}
return status;
}
static void qlge_init_fw_done(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 2;
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
} else {
netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
mbcp->mbox_out[1]);
qdev->fw_rev_id = mbcp->mbox_out[1];
status = qlge_cam_route_initialize(qdev);
if (status)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init CAM/Routing tables.\n");
}
}
/* Process an async event and clear it unless it's an
* error condition.
* This can get called iteratively from the mpi_work thread
* when events arrive via an interrupt.
* It also gets called when a mailbox command is polling for
* it's completion.
*/
static int qlge_mpi_handler(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
int orig_count = mbcp->out_count;
/* Just get mailbox zero for now. */
mbcp->out_count = 1;
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Could not read MPI, resetting ASIC!\n");
qlge_queue_asic_error(qdev);
goto end;
}
switch (mbcp->mbox_out[0]) {
/* This case is only active when we arrive here
* as a result of issuing a mailbox command to
* the firmware.
*/
case MB_CMD_STS_INTRMDT:
case MB_CMD_STS_GOOD:
case MB_CMD_STS_INVLD_CMD:
case MB_CMD_STS_XFC_ERR:
case MB_CMD_STS_CSUM_ERR:
case MB_CMD_STS_ERR:
case MB_CMD_STS_PARAM_ERR:
/* We can only get mailbox status if we're polling from an
* unfinished command. Get the rest of the status data and
* return back to the caller.
* We only end up here when we're polling for a mailbox
* command completion.
*/
mbcp->out_count = orig_count;
status = qlge_get_mb_sts(qdev, mbcp);
return status;
/* We are being asked by firmware to accept
* a change to the port. This is only
* a change to max frame sizes (Tx/Rx), pause
* parameters, or loopback mode.
*/
case AEN_IDC_REQ:
status = qlge_idc_req_aen(qdev);
break;
/* Process and inbound IDC event.
* This will happen when we're trying to
* change tx/rx max frame size, change pause
* parameters or loopback mode.
*/
case AEN_IDC_CMPLT:
case AEN_IDC_EXT:
status = qlge_idc_cmplt_aen(qdev);
break;
case AEN_LINK_UP:
qlge_link_up(qdev, mbcp);
break;
case AEN_LINK_DOWN:
qlge_link_down(qdev, mbcp);
break;
case AEN_FW_INIT_DONE:
/* If we're in process on executing the firmware,
* then convert the status to normal mailbox status.
*/
if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
mbcp->out_count = orig_count;
status = qlge_get_mb_sts(qdev, mbcp);
mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
return status;
}
qlge_init_fw_done(qdev, mbcp);
break;
case AEN_AEN_SFP_IN:
qlge_sfp_in(qdev, mbcp);
break;
case AEN_AEN_SFP_OUT:
qlge_sfp_out(qdev, mbcp);
break;
/* This event can arrive at boot time or after an
* MPI reset if the firmware failed to initialize.
*/
case AEN_FW_INIT_FAIL:
/* If we're in process on executing the firmware,
* then convert the status to normal mailbox status.
*/
if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
mbcp->out_count = orig_count;
status = qlge_get_mb_sts(qdev, mbcp);
mbcp->mbox_out[0] = MB_CMD_STS_ERR;
return status;
}
netif_err(qdev, drv, qdev->ndev,
"Firmware initialization failed.\n");
status = -EIO;
qlge_queue_fw_error(qdev);
break;
case AEN_SYS_ERR:
netif_err(qdev, drv, qdev->ndev, "System Error.\n");
qlge_queue_fw_error(qdev);
status = -EIO;
break;
case AEN_AEN_LOST:
qlge_aen_lost(qdev, mbcp);
break;
case AEN_DCBX_CHG:
/* Need to support AEN 8110 */
break;
default:
netif_err(qdev, drv, qdev->ndev,
"Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
/* Clear the MPI firmware status. */
}
end:
qlge_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
/* Restore the original mailbox count to
* what the caller asked for. This can get
* changed when a mailbox command is waiting
* for a response and an AEN arrives and
* is handled.
*/
mbcp->out_count = orig_count;
return status;
}
/* Execute a single mailbox command.
* mbcp is a pointer to an array of u32. Each
* element in the array contains the value for it's
* respective mailbox register.
*/
static int qlge_mailbox_command(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
unsigned long count;
mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
/* Load the mailbox registers and wake up MPI RISC. */
status = qlge_exec_mb_cmd(qdev, mbcp);
if (status)
goto end;
/* If we're generating a system error, then there's nothing
* to wait for.
*/
if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
goto end;
/* Wait for the command to complete. We loop
* here because some AEN might arrive while
* we're waiting for the mailbox command to
* complete. If more than 5 seconds expire we can
* assume something is wrong.
*/
count = jiffies + HZ * MAILBOX_TIMEOUT;
do {
/* Wait for the interrupt to come in. */
status = qlge_wait_mbx_cmd_cmplt(qdev);
if (status)
continue;
/* Process the event. If it's an AEN, it
* will be handled in-line or a worker
* will be spawned. If it's our completion
* we will catch it below.
*/
status = qlge_mpi_handler(qdev, mbcp);
if (status)
goto end;
/* It's either the completion for our mailbox
* command complete or an AEN. If it's our
* completion then get out.
*/
if (((mbcp->mbox_out[0] & 0x0000f000) ==
MB_CMD_STS_GOOD) ||
((mbcp->mbox_out[0] & 0x0000f000) ==
MB_CMD_STS_INTRMDT))
goto done;
} while (time_before(jiffies, count));
netif_err(qdev, drv, qdev->ndev,
"Timed out waiting for mailbox complete.\n");
status = -ETIMEDOUT;
goto end;
done:
/* Now we can clear the interrupt condition
* and look at our status.
*/
qlge_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
if (((mbcp->mbox_out[0] & 0x0000f000) !=
MB_CMD_STS_GOOD) &&
((mbcp->mbox_out[0] & 0x0000f000) !=
MB_CMD_STS_INTRMDT)) {
status = -EIO;
}
end:
/* End polled mode for MPI */
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
mutex_unlock(&qdev->mpi_mutex);
return status;
}
/* Get MPI firmware version. This will be used for
* driver banner and for ethtool info.
* Returns zero on success.
*/
int qlge_mb_about_fw(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status = 0;
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->in_count = 1;
mbcp->out_count = 3;
mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
netif_err(qdev, drv, qdev->ndev,
"Failed about firmware command\n");
status = -EIO;
}
/* Store the firmware version */
qdev->fw_rev_id = mbcp->mbox_out[1];
return status;
}
/* Get functional state for MPI firmware.
* Returns zero on success.
*/
int qlge_mb_get_fw_state(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status = 0;
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->in_count = 1;
mbcp->out_count = 2;
mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
netif_err(qdev, drv, qdev->ndev,
"Failed Get Firmware State.\n");
status = -EIO;
}
/* If bit zero is set in mbx 1 then the firmware is
* running, but not initialized. This should never
* happen.
*/
if (mbcp->mbox_out[1] & 1) {
netif_err(qdev, drv, qdev->ndev,
"Firmware waiting for initialization.\n");
status = -EIO;
}
return status;
}
/* Send and ACK mailbox command to the firmware to
* let it continue with the change.
*/
static int qlge_mb_idc_ack(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status = 0;
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->in_count = 5;
mbcp->out_count = 1;
mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
status = -EIO;
}
return status;
}
/* Get link settings and maximum frame size settings
* for the current port.
* Most likely will block.
*/
int qlge_mb_set_port_cfg(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status = 0;
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->in_count = 3;
mbcp->out_count = 1;
mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
mbcp->mbox_in[1] = qdev->link_config;
mbcp->mbox_in[2] = qdev->max_frame_size;
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
netif_err(qdev, drv, qdev->ndev,
"Port Config sent, wait for IDC.\n");
} else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
netif_err(qdev, drv, qdev->ndev,
"Failed Set Port Configuration.\n");
status = -EIO;
}
return status;
}
static int qlge_mb_dump_ram(struct qlge_adapter *qdev, u64 req_dma, u32 addr,
u32 size)
{
int status = 0;
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->in_count = 9;
mbcp->out_count = 1;
mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
mbcp->mbox_in[1] = LSW(addr);
mbcp->mbox_in[2] = MSW(req_dma);
mbcp->mbox_in[3] = LSW(req_dma);
mbcp->mbox_in[4] = MSW(size);
mbcp->mbox_in[5] = LSW(size);
mbcp->mbox_in[6] = MSW(MSD(req_dma));
mbcp->mbox_in[7] = LSW(MSD(req_dma));
mbcp->mbox_in[8] = MSW(addr);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
status = -EIO;
}
return status;
}
/* Issue a mailbox command to dump RISC RAM. */
int qlge_dump_risc_ram_area(struct qlge_adapter *qdev, void *buf,
u32 ram_addr, int word_count)
{
int status;
char *my_buf;
dma_addr_t buf_dma;
my_buf = dma_alloc_coherent(&qdev->pdev->dev,
word_count * sizeof(u32), &buf_dma,
GFP_ATOMIC);
if (!my_buf)
return -EIO;
status = qlge_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
if (!status)
memcpy(buf, my_buf, word_count * sizeof(u32));
dma_free_coherent(&qdev->pdev->dev, word_count * sizeof(u32), my_buf,
buf_dma);
return status;
}
/* Get link settings and maximum frame size settings
* for the current port.
* Most likely will block.
*/
int qlge_mb_get_port_cfg(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status = 0;
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->in_count = 1;
mbcp->out_count = 3;
mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
netif_err(qdev, drv, qdev->ndev,
"Failed Get Port Configuration.\n");
status = -EIO;
} else {
netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
"Passed Get Port Configuration.\n");
qdev->link_config = mbcp->mbox_out[1];
qdev->max_frame_size = mbcp->mbox_out[2];
}
return status;
}
int qlge_mb_wol_mode(struct qlge_adapter *qdev, u32 wol)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status;
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->in_count = 2;
mbcp->out_count = 1;
mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
mbcp->mbox_in[1] = wol;
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
status = -EIO;
}
return status;
}
int qlge_mb_wol_set_magic(struct qlge_adapter *qdev, u32 enable_wol)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status;
const u8 *addr = qdev->ndev->dev_addr;
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->in_count = 8;
mbcp->out_count = 1;
mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
if (enable_wol) {
mbcp->mbox_in[1] = (u32)addr[0];
mbcp->mbox_in[2] = (u32)addr[1];
mbcp->mbox_in[3] = (u32)addr[2];
mbcp->mbox_in[4] = (u32)addr[3];
mbcp->mbox_in[5] = (u32)addr[4];
mbcp->mbox_in[6] = (u32)addr[5];
mbcp->mbox_in[7] = 0;
} else {
mbcp->mbox_in[1] = 0;
mbcp->mbox_in[2] = 1;
mbcp->mbox_in[3] = 1;
mbcp->mbox_in[4] = 1;
mbcp->mbox_in[5] = 1;
mbcp->mbox_in[6] = 1;
mbcp->mbox_in[7] = 0;
}
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
status = -EIO;
}
return status;
}
/* IDC - Inter Device Communication...
* Some firmware commands require consent of adjacent FCOE
* function. This function waits for the OK, or a
* counter-request for a little more time.i
* The firmware will complete the request if the other
* function doesn't respond.
*/
static int qlge_idc_wait(struct qlge_adapter *qdev)
{
int status = -ETIMEDOUT;
struct mbox_params *mbcp = &qdev->idc_mbc;
long wait_time;
for (wait_time = 1 * HZ; wait_time;) {
/* Wait here for the command to complete
* via the IDC process.
*/
wait_time =
wait_for_completion_timeout(&qdev->ide_completion,
wait_time);
if (!wait_time) {
netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
break;
}
/* Now examine the response from the IDC process.
* We might have a good completion or a request for
* more wait time.
*/
if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
netif_err(qdev, drv, qdev->ndev,
"IDC Time Extension from function.\n");
wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
} else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
status = 0;
break;
} else {
netif_err(qdev, drv, qdev->ndev,
"IDC: Invalid State 0x%.04x.\n",
mbcp->mbox_out[0]);
status = -EIO;
break;
}
}
return status;
}
int qlge_mb_set_led_cfg(struct qlge_adapter *qdev, u32 led_config)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status;
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->in_count = 2;
mbcp->out_count = 1;
mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
mbcp->mbox_in[1] = led_config;
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
netif_err(qdev, drv, qdev->ndev,
"Failed to set LED Configuration.\n");
status = -EIO;
}
return status;
}
int qlge_mb_get_led_cfg(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status;
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->in_count = 1;
mbcp->out_count = 2;
mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
netif_err(qdev, drv, qdev->ndev,
"Failed to get LED Configuration.\n");
status = -EIO;
} else {
qdev->led_config = mbcp->mbox_out[1];
}
return status;
}
int qlge_mb_set_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 control)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status;
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->in_count = 1;
mbcp->out_count = 2;
mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
mbcp->mbox_in[1] = control;
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
netif_err(qdev, drv, qdev->ndev,
"Command not supported by firmware.\n");
status = -EINVAL;
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
/* This indicates that the firmware is
* already in the state we are trying to
* change it to.
*/
netif_err(qdev, drv, qdev->ndev,
"Command parameters make no change.\n");
}
return status;
}
/* Returns a negative error code or the mailbox command status. */
static int qlge_mb_get_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 *control)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status;
memset(mbcp, 0, sizeof(struct mbox_params));
*control = 0;
mbcp->in_count = 1;
mbcp->out_count = 1;
mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
*control = mbcp->mbox_in[1];
return status;
}
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
netif_err(qdev, drv, qdev->ndev,
"Command not supported by firmware.\n");
status = -EINVAL;
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
netif_err(qdev, drv, qdev->ndev,
"Failed to get MPI traffic control.\n");
status = -EIO;
}
return status;
}
int qlge_wait_fifo_empty(struct qlge_adapter *qdev)
{
int count;
u32 mgmnt_fifo_empty;
u32 nic_fifo_empty;
for (count = 6; count; count--) {
nic_fifo_empty = qlge_read32(qdev, STS) & STS_NFE;
qlge_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
if (nic_fifo_empty && mgmnt_fifo_empty)
return 0;
msleep(100);
}
return -ETIMEDOUT;
}
/* API called in work thread context to set new TX/RX
* maximum frame size values to match MTU.
*/
static int qlge_set_port_cfg(struct qlge_adapter *qdev)
{
int status;
status = qlge_mb_set_port_cfg(qdev);
if (status)
return status;
status = qlge_idc_wait(qdev);
return status;
}
/* The following routines are worker threads that process
* events that may sleep waiting for completion.
*/
/* This thread gets the maximum TX and RX frame size values
* from the firmware and, if necessary, changes them to match
* the MTU setting.
*/
void qlge_mpi_port_cfg_work(struct work_struct *work)
{
struct qlge_adapter *qdev =
container_of(work, struct qlge_adapter, mpi_port_cfg_work.work);
int status;
status = qlge_mb_get_port_cfg(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Bug: Failed to get port config data.\n");
goto err;
}
if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
qdev->max_frame_size == CFG_DEFAULT_MAX_FRAME_SIZE)
goto end;
qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
status = qlge_set_port_cfg(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Bug: Failed to set port config data.\n");
goto err;
}
end:
clear_bit(QL_PORT_CFG, &qdev->flags);
return;
err:
qlge_queue_fw_error(qdev);
goto end;
}
/* Process an inter-device request. This is issues by
* the firmware in response to another function requesting
* a change to the port. We set a flag to indicate a change
* has been made and then send a mailbox command ACKing
* the change request.
*/
void qlge_mpi_idc_work(struct work_struct *work)
{
struct qlge_adapter *qdev =
container_of(work, struct qlge_adapter, mpi_idc_work.work);
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
u32 aen;
int timeout;
aen = mbcp->mbox_out[1] >> 16;
timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
switch (aen) {
default:
netif_err(qdev, drv, qdev->ndev,
"Bug: Unhandled IDC action.\n");
break;
case MB_CMD_PORT_RESET:
case MB_CMD_STOP_FW:
qlge_link_off(qdev);
fallthrough;
case MB_CMD_SET_PORT_CFG:
/* Signal the resulting link up AEN
* that the frame routing and mac addr
* needs to be set.
*/
set_bit(QL_CAM_RT_SET, &qdev->flags);
/* Do ACK if required */
if (timeout) {
status = qlge_mb_idc_ack(qdev);
if (status)
netif_err(qdev, drv, qdev->ndev,
"Bug: No pending IDC!\n");
} else {
netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
"IDC ACK not required\n");
status = 0; /* success */
}
break;
/* These sub-commands issued by another (FCoE)
* function are requesting to do an operation
* on the shared resource (MPI environment).
* We currently don't issue these so we just
* ACK the request.
*/
case MB_CMD_IOP_RESTART_MPI:
case MB_CMD_IOP_PREP_LINK_DOWN:
/* Drop the link, reload the routing
* table when link comes up.
*/
qlge_link_off(qdev);
set_bit(QL_CAM_RT_SET, &qdev->flags);
fallthrough;
case MB_CMD_IOP_DVR_START:
case MB_CMD_IOP_FLASH_ACC:
case MB_CMD_IOP_CORE_DUMP_MPI:
case MB_CMD_IOP_PREP_UPDATE_MPI:
case MB_CMD_IOP_COMP_UPDATE_MPI:
case MB_CMD_IOP_NONE: /* an IDC without params */
/* Do ACK if required */
if (timeout) {
status = qlge_mb_idc_ack(qdev);
if (status)
netif_err(qdev, drv, qdev->ndev,
"Bug: No pending IDC!\n");
} else {
netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
"IDC ACK not required\n");
status = 0; /* success */
}
break;
}
}
void qlge_mpi_work(struct work_struct *work)
{
struct qlge_adapter *qdev =
container_of(work, struct qlge_adapter, mpi_work.work);
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int err = 0;
mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
while (qlge_read32(qdev, STS) & STS_PI) {
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->out_count = 1;
/* Don't continue if an async event
* did not complete properly.
*/
err = qlge_mpi_handler(qdev, mbcp);
if (err)
break;
}
/* End polled mode for MPI */
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
mutex_unlock(&qdev->mpi_mutex);
}
void qlge_mpi_reset_work(struct work_struct *work)
{
struct qlge_adapter *qdev =
container_of(work, struct qlge_adapter, mpi_reset_work.work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
/* If we're not the dominant NIC function,
* then there is nothing to do.
*/
if (!qlge_own_firmware(qdev)) {
netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
return;
}
qlge_soft_reset_mpi_risc(qdev);
}
| linux-master | drivers/staging/qlge/qlge_mpi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx AXIS FIFO: interface to the Xilinx AXI-Stream FIFO IP core
*
* Copyright (C) 2018 Jacob Feder
*
* Authors: Jacob Feder <[email protected]>
*
* See Xilinx PG080 document for IP details
*/
/* ----------------------------
* includes
* ----------------------------
*/
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h>
#include <linux/param.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/jiffies.h>
#include <linux/miscdevice.h>
/* ----------------------------
* driver parameters
* ----------------------------
*/
#define DRIVER_NAME "axis_fifo"
#define READ_BUF_SIZE 128U /* read buffer length in words */
#define WRITE_BUF_SIZE 128U /* write buffer length in words */
/* ----------------------------
* IP register offsets
* ----------------------------
*/
#define XLLF_ISR_OFFSET 0x00000000 /* Interrupt Status */
#define XLLF_IER_OFFSET 0x00000004 /* Interrupt Enable */
#define XLLF_TDFR_OFFSET 0x00000008 /* Transmit Reset */
#define XLLF_TDFV_OFFSET 0x0000000c /* Transmit Vacancy */
#define XLLF_TDFD_OFFSET 0x00000010 /* Transmit Data */
#define XLLF_TLR_OFFSET 0x00000014 /* Transmit Length */
#define XLLF_RDFR_OFFSET 0x00000018 /* Receive Reset */
#define XLLF_RDFO_OFFSET 0x0000001c /* Receive Occupancy */
#define XLLF_RDFD_OFFSET 0x00000020 /* Receive Data */
#define XLLF_RLR_OFFSET 0x00000024 /* Receive Length */
#define XLLF_SRR_OFFSET 0x00000028 /* Local Link Reset */
#define XLLF_TDR_OFFSET 0x0000002C /* Transmit Destination */
#define XLLF_RDR_OFFSET 0x00000030 /* Receive Destination */
/* ----------------------------
* reset register masks
* ----------------------------
*/
#define XLLF_RDFR_RESET_MASK 0x000000a5 /* receive reset value */
#define XLLF_TDFR_RESET_MASK 0x000000a5 /* Transmit reset value */
#define XLLF_SRR_RESET_MASK 0x000000a5 /* Local Link reset value */
/* ----------------------------
* interrupt masks
* ----------------------------
*/
#define XLLF_INT_RPURE_MASK 0x80000000 /* Receive under-read */
#define XLLF_INT_RPORE_MASK 0x40000000 /* Receive over-read */
#define XLLF_INT_RPUE_MASK 0x20000000 /* Receive underrun (empty) */
#define XLLF_INT_TPOE_MASK 0x10000000 /* Transmit overrun */
#define XLLF_INT_TC_MASK 0x08000000 /* Transmit complete */
#define XLLF_INT_RC_MASK 0x04000000 /* Receive complete */
#define XLLF_INT_TSE_MASK 0x02000000 /* Transmit length mismatch */
#define XLLF_INT_TRC_MASK 0x01000000 /* Transmit reset complete */
#define XLLF_INT_RRC_MASK 0x00800000 /* Receive reset complete */
#define XLLF_INT_TFPF_MASK 0x00400000 /* Tx FIFO Programmable Full */
#define XLLF_INT_TFPE_MASK 0x00200000 /* Tx FIFO Programmable Empty */
#define XLLF_INT_RFPF_MASK 0x00100000 /* Rx FIFO Programmable Full */
#define XLLF_INT_RFPE_MASK 0x00080000 /* Rx FIFO Programmable Empty */
#define XLLF_INT_ALL_MASK 0xfff80000 /* All the ints */
#define XLLF_INT_ERROR_MASK 0xf2000000 /* Error status ints */
#define XLLF_INT_RXERROR_MASK 0xe0000000 /* Receive Error status ints */
#define XLLF_INT_TXERROR_MASK 0x12000000 /* Transmit Error status ints */
/* ----------------------------
* globals
* ----------------------------
*/
static long read_timeout = 1000; /* ms to wait before read() times out */
static long write_timeout = 1000; /* ms to wait before write() times out */
/* ----------------------------
* module command-line arguments
* ----------------------------
*/
module_param(read_timeout, long, 0444);
MODULE_PARM_DESC(read_timeout, "ms to wait before blocking read() timing out; set to -1 for no timeout");
module_param(write_timeout, long, 0444);
MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out; set to -1 for no timeout");
/* ----------------------------
* types
* ----------------------------
*/
struct axis_fifo {
int irq; /* interrupt */
void __iomem *base_addr; /* kernel space memory */
unsigned int rx_fifo_depth; /* max words in the receive fifo */
unsigned int tx_fifo_depth; /* max words in the transmit fifo */
int has_rx_fifo; /* whether the IP has the rx fifo enabled */
int has_tx_fifo; /* whether the IP has the tx fifo enabled */
wait_queue_head_t read_queue; /* wait queue for asynchronos read */
struct mutex read_lock; /* lock for reading */
wait_queue_head_t write_queue; /* wait queue for asynchronos write */
struct mutex write_lock; /* lock for writing */
unsigned int write_flags; /* write file flags */
unsigned int read_flags; /* read file flags */
struct device *dt_device; /* device created from the device tree */
struct miscdevice miscdev;
};
/* ----------------------------
* sysfs entries
* ----------------------------
*/
static ssize_t sysfs_write(struct device *dev, const char *buf,
size_t count, unsigned int addr_offset)
{
struct axis_fifo *fifo = dev_get_drvdata(dev);
unsigned long tmp;
int rc;
rc = kstrtoul(buf, 0, &tmp);
if (rc < 0)
return rc;
iowrite32(tmp, fifo->base_addr + addr_offset);
return count;
}
static ssize_t sysfs_read(struct device *dev, char *buf,
unsigned int addr_offset)
{
struct axis_fifo *fifo = dev_get_drvdata(dev);
unsigned int read_val;
unsigned int len;
char tmp[32];
read_val = ioread32(fifo->base_addr + addr_offset);
len = snprintf(tmp, sizeof(tmp), "0x%x\n", read_val);
memcpy(buf, tmp, len);
return len;
}
static ssize_t isr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return sysfs_write(dev, buf, count, XLLF_ISR_OFFSET);
}
static ssize_t isr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_read(dev, buf, XLLF_ISR_OFFSET);
}
static DEVICE_ATTR_RW(isr);
static ssize_t ier_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return sysfs_write(dev, buf, count, XLLF_IER_OFFSET);
}
static ssize_t ier_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_read(dev, buf, XLLF_IER_OFFSET);
}
static DEVICE_ATTR_RW(ier);
static ssize_t tdfr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return sysfs_write(dev, buf, count, XLLF_TDFR_OFFSET);
}
static DEVICE_ATTR_WO(tdfr);
static ssize_t tdfv_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_read(dev, buf, XLLF_TDFV_OFFSET);
}
static DEVICE_ATTR_RO(tdfv);
static ssize_t tdfd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return sysfs_write(dev, buf, count, XLLF_TDFD_OFFSET);
}
static DEVICE_ATTR_WO(tdfd);
static ssize_t tlr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return sysfs_write(dev, buf, count, XLLF_TLR_OFFSET);
}
static DEVICE_ATTR_WO(tlr);
static ssize_t rdfr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return sysfs_write(dev, buf, count, XLLF_RDFR_OFFSET);
}
static DEVICE_ATTR_WO(rdfr);
static ssize_t rdfo_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_read(dev, buf, XLLF_RDFO_OFFSET);
}
static DEVICE_ATTR_RO(rdfo);
static ssize_t rdfd_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_read(dev, buf, XLLF_RDFD_OFFSET);
}
static DEVICE_ATTR_RO(rdfd);
static ssize_t rlr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_read(dev, buf, XLLF_RLR_OFFSET);
}
static DEVICE_ATTR_RO(rlr);
static ssize_t srr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return sysfs_write(dev, buf, count, XLLF_SRR_OFFSET);
}
static DEVICE_ATTR_WO(srr);
static ssize_t tdr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return sysfs_write(dev, buf, count, XLLF_TDR_OFFSET);
}
static DEVICE_ATTR_WO(tdr);
static ssize_t rdr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_read(dev, buf, XLLF_RDR_OFFSET);
}
static DEVICE_ATTR_RO(rdr);
static struct attribute *axis_fifo_attrs[] = {
&dev_attr_isr.attr,
&dev_attr_ier.attr,
&dev_attr_tdfr.attr,
&dev_attr_tdfv.attr,
&dev_attr_tdfd.attr,
&dev_attr_tlr.attr,
&dev_attr_rdfr.attr,
&dev_attr_rdfo.attr,
&dev_attr_rdfd.attr,
&dev_attr_rlr.attr,
&dev_attr_srr.attr,
&dev_attr_tdr.attr,
&dev_attr_rdr.attr,
NULL,
};
static const struct attribute_group axis_fifo_attrs_group = {
.name = "ip_registers",
.attrs = axis_fifo_attrs,
};
static const struct attribute_group *axis_fifo_attrs_groups[] = {
&axis_fifo_attrs_group,
NULL,
};
/* ----------------------------
* implementation
* ----------------------------
*/
static void reset_ip_core(struct axis_fifo *fifo)
{
iowrite32(XLLF_SRR_RESET_MASK, fifo->base_addr + XLLF_SRR_OFFSET);
iowrite32(XLLF_TDFR_RESET_MASK, fifo->base_addr + XLLF_TDFR_OFFSET);
iowrite32(XLLF_RDFR_RESET_MASK, fifo->base_addr + XLLF_RDFR_OFFSET);
iowrite32(XLLF_INT_TC_MASK | XLLF_INT_RC_MASK | XLLF_INT_RPURE_MASK |
XLLF_INT_RPORE_MASK | XLLF_INT_RPUE_MASK |
XLLF_INT_TPOE_MASK | XLLF_INT_TSE_MASK,
fifo->base_addr + XLLF_IER_OFFSET);
iowrite32(XLLF_INT_ALL_MASK, fifo->base_addr + XLLF_ISR_OFFSET);
}
/**
* axis_fifo_read() - Read a packet from AXIS-FIFO character device.
* @f: Open file.
* @buf: User space buffer to read to.
* @len: User space buffer length.
* @off: Buffer offset.
*
* As defined by the device's documentation, we need to check the device's
* occupancy before reading the length register and then the data. All these
* operations must be executed atomically, in order and one after the other
* without missing any.
*
* Returns the number of bytes read from the device or negative error code
* on failure.
*/
static ssize_t axis_fifo_read(struct file *f, char __user *buf,
size_t len, loff_t *off)
{
struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
size_t bytes_available;
unsigned int words_available;
unsigned int copied;
unsigned int copy;
unsigned int i;
int ret;
u32 tmp_buf[READ_BUF_SIZE];
if (fifo->read_flags & O_NONBLOCK) {
/*
* Device opened in non-blocking mode. Try to lock it and then
* check if any packet is available.
*/
if (!mutex_trylock(&fifo->read_lock))
return -EAGAIN;
if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET)) {
ret = -EAGAIN;
goto end_unlock;
}
} else {
/* opened in blocking mode
* wait for a packet available interrupt (or timeout)
* if nothing is currently available
*/
mutex_lock(&fifo->read_lock);
ret = wait_event_interruptible_timeout(fifo->read_queue,
ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
read_timeout);
if (ret <= 0) {
if (ret == 0) {
ret = -EAGAIN;
} else if (ret != -ERESTARTSYS) {
dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
ret);
}
goto end_unlock;
}
}
bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET);
if (!bytes_available) {
dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n");
reset_ip_core(fifo);
ret = -EIO;
goto end_unlock;
}
if (bytes_available > len) {
dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n",
bytes_available, len);
reset_ip_core(fifo);
ret = -EINVAL;
goto end_unlock;
}
if (bytes_available % sizeof(u32)) {
/* this probably can't happen unless IP
* registers were previously mishandled
*/
dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n");
reset_ip_core(fifo);
ret = -EIO;
goto end_unlock;
}
words_available = bytes_available / sizeof(u32);
/* read data into an intermediate buffer, copying the contents
* to userspace when the buffer is full
*/
copied = 0;
while (words_available > 0) {
copy = min(words_available, READ_BUF_SIZE);
for (i = 0; i < copy; i++) {
tmp_buf[i] = ioread32(fifo->base_addr +
XLLF_RDFD_OFFSET);
}
if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
copy * sizeof(u32))) {
reset_ip_core(fifo);
ret = -EFAULT;
goto end_unlock;
}
copied += copy;
words_available -= copy;
}
ret = bytes_available;
end_unlock:
mutex_unlock(&fifo->read_lock);
return ret;
}
/**
* axis_fifo_write() - Write buffer to AXIS-FIFO character device.
* @f: Open file.
* @buf: User space buffer to write to the device.
* @len: User space buffer length.
* @off: Buffer offset.
*
* As defined by the device's documentation, we need to write to the device's
* data buffer then to the device's packet length register atomically. Also,
* we need to lock before checking if the device has available space to avoid
* any concurrency issue.
*
* Returns the number of bytes written to the device or negative error code
* on failure.
*/
static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
size_t len, loff_t *off)
{
struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
unsigned int words_to_write;
unsigned int copied;
unsigned int copy;
unsigned int i;
int ret;
u32 tmp_buf[WRITE_BUF_SIZE];
if (len % sizeof(u32)) {
dev_err(fifo->dt_device,
"tried to send a packet that isn't word-aligned\n");
return -EINVAL;
}
words_to_write = len / sizeof(u32);
if (!words_to_write) {
dev_err(fifo->dt_device,
"tried to send a packet of length 0\n");
return -EINVAL;
}
if (words_to_write > fifo->tx_fifo_depth) {
dev_err(fifo->dt_device, "tried to write more words [%u] than slots in the fifo buffer [%u]\n",
words_to_write, fifo->tx_fifo_depth);
return -EINVAL;
}
if (fifo->write_flags & O_NONBLOCK) {
/*
* Device opened in non-blocking mode. Try to lock it and then
* check if there is any room to write the given buffer.
*/
if (!mutex_trylock(&fifo->write_lock))
return -EAGAIN;
if (words_to_write > ioread32(fifo->base_addr +
XLLF_TDFV_OFFSET)) {
ret = -EAGAIN;
goto end_unlock;
}
} else {
/* opened in blocking mode */
/* wait for an interrupt (or timeout) if there isn't
* currently enough room in the fifo
*/
mutex_lock(&fifo->write_lock);
ret = wait_event_interruptible_timeout(fifo->write_queue,
ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
>= words_to_write,
write_timeout);
if (ret <= 0) {
if (ret == 0) {
ret = -EAGAIN;
} else if (ret != -ERESTARTSYS) {
dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in write (ret=%i)\n",
ret);
}
goto end_unlock;
}
}
/* write data from an intermediate buffer into the fifo IP, refilling
* the buffer with userspace data as needed
*/
copied = 0;
while (words_to_write > 0) {
copy = min(words_to_write, WRITE_BUF_SIZE);
if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
copy * sizeof(u32))) {
reset_ip_core(fifo);
ret = -EFAULT;
goto end_unlock;
}
for (i = 0; i < copy; i++)
iowrite32(tmp_buf[i], fifo->base_addr +
XLLF_TDFD_OFFSET);
copied += copy;
words_to_write -= copy;
}
ret = copied * sizeof(u32);
/* write packet size to fifo */
iowrite32(ret, fifo->base_addr + XLLF_TLR_OFFSET);
end_unlock:
mutex_unlock(&fifo->write_lock);
return ret;
}
static irqreturn_t axis_fifo_irq(int irq, void *dw)
{
struct axis_fifo *fifo = (struct axis_fifo *)dw;
unsigned int pending_interrupts;
do {
pending_interrupts = ioread32(fifo->base_addr +
XLLF_IER_OFFSET) &
ioread32(fifo->base_addr
+ XLLF_ISR_OFFSET);
if (pending_interrupts & XLLF_INT_RC_MASK) {
/* packet received */
/* wake the reader process if it is waiting */
wake_up(&fifo->read_queue);
/* clear interrupt */
iowrite32(XLLF_INT_RC_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_TC_MASK) {
/* packet sent */
/* wake the writer process if it is waiting */
wake_up(&fifo->write_queue);
iowrite32(XLLF_INT_TC_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_TFPF_MASK) {
/* transmit fifo programmable full */
iowrite32(XLLF_INT_TFPF_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_TFPE_MASK) {
/* transmit fifo programmable empty */
iowrite32(XLLF_INT_TFPE_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_RFPF_MASK) {
/* receive fifo programmable full */
iowrite32(XLLF_INT_RFPF_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_RFPE_MASK) {
/* receive fifo programmable empty */
iowrite32(XLLF_INT_RFPE_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_TRC_MASK) {
/* transmit reset complete interrupt */
iowrite32(XLLF_INT_TRC_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_RRC_MASK) {
/* receive reset complete interrupt */
iowrite32(XLLF_INT_RRC_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_RPURE_MASK) {
/* receive fifo under-read error interrupt */
dev_err(fifo->dt_device,
"receive under-read interrupt\n");
iowrite32(XLLF_INT_RPURE_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_RPORE_MASK) {
/* receive over-read error interrupt */
dev_err(fifo->dt_device,
"receive over-read interrupt\n");
iowrite32(XLLF_INT_RPORE_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_RPUE_MASK) {
/* receive underrun error interrupt */
dev_err(fifo->dt_device,
"receive underrun error interrupt\n");
iowrite32(XLLF_INT_RPUE_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_TPOE_MASK) {
/* transmit overrun error interrupt */
dev_err(fifo->dt_device,
"transmit overrun error interrupt\n");
iowrite32(XLLF_INT_TPOE_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts & XLLF_INT_TSE_MASK) {
/* transmit length mismatch error interrupt */
dev_err(fifo->dt_device,
"transmit length mismatch error interrupt\n");
iowrite32(XLLF_INT_TSE_MASK & XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
} else if (pending_interrupts) {
/* unknown interrupt type */
dev_err(fifo->dt_device,
"unknown interrupt(s) 0x%x\n",
pending_interrupts);
iowrite32(XLLF_INT_ALL_MASK,
fifo->base_addr + XLLF_ISR_OFFSET);
}
} while (pending_interrupts);
return IRQ_HANDLED;
}
static int axis_fifo_open(struct inode *inod, struct file *f)
{
struct axis_fifo *fifo = container_of(f->private_data,
struct axis_fifo, miscdev);
f->private_data = fifo;
if (((f->f_flags & O_ACCMODE) == O_WRONLY) ||
((f->f_flags & O_ACCMODE) == O_RDWR)) {
if (fifo->has_tx_fifo) {
fifo->write_flags = f->f_flags;
} else {
dev_err(fifo->dt_device, "tried to open device for write but the transmit fifo is disabled\n");
return -EPERM;
}
}
if (((f->f_flags & O_ACCMODE) == O_RDONLY) ||
((f->f_flags & O_ACCMODE) == O_RDWR)) {
if (fifo->has_rx_fifo) {
fifo->read_flags = f->f_flags;
} else {
dev_err(fifo->dt_device, "tried to open device for read but the receive fifo is disabled\n");
return -EPERM;
}
}
return 0;
}
static int axis_fifo_close(struct inode *inod, struct file *f)
{
f->private_data = NULL;
return 0;
}
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = axis_fifo_open,
.release = axis_fifo_close,
.read = axis_fifo_read,
.write = axis_fifo_write
};
/* read named property from the device tree */
static int get_dts_property(struct axis_fifo *fifo,
char *name, unsigned int *var)
{
int rc;
rc = of_property_read_u32(fifo->dt_device->of_node, name, var);
if (rc) {
dev_err(fifo->dt_device, "couldn't read IP dts property '%s'",
name);
return rc;
}
dev_dbg(fifo->dt_device, "dts property '%s' = %u\n",
name, *var);
return 0;
}
static int axis_fifo_parse_dt(struct axis_fifo *fifo)
{
int ret;
unsigned int value;
ret = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width", &value);
if (ret) {
dev_err(fifo->dt_device, "missing xlnx,axi-str-rxd-tdata-width property\n");
goto end;
} else if (value != 32) {
dev_err(fifo->dt_device, "xlnx,axi-str-rxd-tdata-width only supports 32 bits\n");
ret = -EIO;
goto end;
}
ret = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width", &value);
if (ret) {
dev_err(fifo->dt_device, "missing xlnx,axi-str-txd-tdata-width property\n");
goto end;
} else if (value != 32) {
dev_err(fifo->dt_device, "xlnx,axi-str-txd-tdata-width only supports 32 bits\n");
ret = -EIO;
goto end;
}
ret = get_dts_property(fifo, "xlnx,rx-fifo-depth",
&fifo->rx_fifo_depth);
if (ret) {
dev_err(fifo->dt_device, "missing xlnx,rx-fifo-depth property\n");
ret = -EIO;
goto end;
}
ret = get_dts_property(fifo, "xlnx,tx-fifo-depth",
&fifo->tx_fifo_depth);
if (ret) {
dev_err(fifo->dt_device, "missing xlnx,tx-fifo-depth property\n");
ret = -EIO;
goto end;
}
/* IP sets TDFV to fifo depth - 4 so we will do the same */
fifo->tx_fifo_depth -= 4;
ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo);
if (ret) {
dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n");
ret = -EIO;
goto end;
}
ret = get_dts_property(fifo, "xlnx,use-tx-data", &fifo->has_tx_fifo);
if (ret) {
dev_err(fifo->dt_device, "missing xlnx,use-tx-data property\n");
ret = -EIO;
goto end;
}
end:
return ret;
}
static int axis_fifo_probe(struct platform_device *pdev)
{
struct resource *r_mem; /* IO mem resources */
struct device *dev = &pdev->dev; /* OS device (from device tree) */
struct axis_fifo *fifo = NULL;
char *device_name;
int rc = 0; /* error return value */
/* ----------------------------
* init wrapper device
* ----------------------------
*/
device_name = devm_kzalloc(dev, 32, GFP_KERNEL);
if (!device_name)
return -ENOMEM;
/* allocate device wrapper memory */
fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
if (!fifo)
return -ENOMEM;
dev_set_drvdata(dev, fifo);
fifo->dt_device = dev;
init_waitqueue_head(&fifo->read_queue);
init_waitqueue_head(&fifo->write_queue);
mutex_init(&fifo->read_lock);
mutex_init(&fifo->write_lock);
/* ----------------------------
* init device memory space
* ----------------------------
*/
/* get iospace for the device and request physical memory */
fifo->base_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem);
if (IS_ERR(fifo->base_addr)) {
rc = PTR_ERR(fifo->base_addr);
goto err_initial;
}
dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr);
/* create unique device name */
snprintf(device_name, 32, "%s_%pa", DRIVER_NAME, &r_mem->start);
dev_dbg(fifo->dt_device, "device name [%s]\n", device_name);
/* ----------------------------
* init IP
* ----------------------------
*/
rc = axis_fifo_parse_dt(fifo);
if (rc)
goto err_initial;
reset_ip_core(fifo);
/* ----------------------------
* init device interrupts
* ----------------------------
*/
/* get IRQ resource */
rc = platform_get_irq(pdev, 0);
if (rc < 0)
goto err_initial;
/* request IRQ */
fifo->irq = rc;
rc = devm_request_irq(fifo->dt_device, fifo->irq, &axis_fifo_irq, 0,
DRIVER_NAME, fifo);
if (rc) {
dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n",
fifo->irq);
goto err_initial;
}
/* ----------------------------
* init char device
* ----------------------------
*/
/* create character device */
fifo->miscdev.fops = &fops;
fifo->miscdev.minor = MISC_DYNAMIC_MINOR;
fifo->miscdev.name = device_name;
fifo->miscdev.groups = axis_fifo_attrs_groups;
fifo->miscdev.parent = dev;
rc = misc_register(&fifo->miscdev);
if (rc < 0)
goto err_initial;
return 0;
err_initial:
dev_set_drvdata(dev, NULL);
return rc;
}
static void axis_fifo_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct axis_fifo *fifo = dev_get_drvdata(dev);
misc_deregister(&fifo->miscdev);
dev_set_drvdata(dev, NULL);
}
static const struct of_device_id axis_fifo_of_match[] = {
{ .compatible = "xlnx,axi-fifo-mm-s-4.1", },
{},
};
MODULE_DEVICE_TABLE(of, axis_fifo_of_match);
static struct platform_driver axis_fifo_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = axis_fifo_of_match,
},
.probe = axis_fifo_probe,
.remove_new = axis_fifo_remove,
};
static int __init axis_fifo_init(void)
{
if (read_timeout >= 0)
read_timeout = msecs_to_jiffies(read_timeout);
else
read_timeout = MAX_SCHEDULE_TIMEOUT;
if (write_timeout >= 0)
write_timeout = msecs_to_jiffies(write_timeout);
else
write_timeout = MAX_SCHEDULE_TIMEOUT;
pr_info("axis-fifo driver loaded with parameters read_timeout = %li, write_timeout = %li\n",
read_timeout, write_timeout);
return platform_driver_register(&axis_fifo_driver);
}
module_init(axis_fifo_init);
static void __exit axis_fifo_exit(void)
{
platform_driver_unregister(&axis_fifo_driver);
}
module_exit(axis_fifo_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jacob Feder <[email protected]>");
MODULE_DESCRIPTION("Xilinx AXI-Stream FIFO v4.1 IP core driver");
| linux-master | drivers/staging/axis-fifo/axis-fifo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/gadget/emxx_udc.c
* EMXX FCD (Function Controller Driver) for USB.
*
* Copyright (C) 2010 Renesas Electronics Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/clk.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
#include "emxx_udc.h"
#define DRIVER_DESC "EMXX UDC driver"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
static struct gpio_desc *vbus_gpio;
static int vbus_irq;
static const char driver_name[] = "emxx_udc";
/*===========================================================================*/
/* Prototype */
static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *, struct nbu2ss_ep *);
static void _nbu2ss_ep0_enable(struct nbu2ss_udc *);
/*static void _nbu2ss_ep0_disable(struct nbu2ss_udc *);*/
static void _nbu2ss_ep_done(struct nbu2ss_ep *, struct nbu2ss_req *, int);
static void _nbu2ss_set_test_mode(struct nbu2ss_udc *, u32 mode);
static void _nbu2ss_endpoint_toggle_reset(struct nbu2ss_udc *udc, u8 ep_adrs);
static int _nbu2ss_pullup(struct nbu2ss_udc *, int);
static void _nbu2ss_fifo_flush(struct nbu2ss_udc *, struct nbu2ss_ep *);
/*===========================================================================*/
/* Macro */
#define _nbu2ss_zero_len_pkt(udc, epnum) \
_nbu2ss_ep_in_end(udc, epnum, 0, 0)
/*===========================================================================*/
/* Global */
static struct nbu2ss_udc udc_controller;
/*-------------------------------------------------------------------------*/
/* Read */
static inline u32 _nbu2ss_readl(void __iomem *address)
{
return __raw_readl(address);
}
/*-------------------------------------------------------------------------*/
/* Write */
static inline void _nbu2ss_writel(void __iomem *address, u32 udata)
{
__raw_writel(udata, address);
}
/*-------------------------------------------------------------------------*/
/* Set Bit */
static inline void _nbu2ss_bitset(void __iomem *address, u32 udata)
{
u32 reg_dt = __raw_readl(address) | (udata);
__raw_writel(reg_dt, address);
}
/*-------------------------------------------------------------------------*/
/* Clear Bit */
static inline void _nbu2ss_bitclr(void __iomem *address, u32 udata)
{
u32 reg_dt = __raw_readl(address) & ~(udata);
__raw_writel(reg_dt, address);
}
#ifdef UDC_DEBUG_DUMP
/*-------------------------------------------------------------------------*/
static void _nbu2ss_dump_register(struct nbu2ss_udc *udc)
{
int i;
u32 reg_data;
pr_info("=== %s()\n", __func__);
if (!udc) {
pr_err("%s udc == NULL\n", __func__);
return;
}
spin_unlock(&udc->lock);
dev_dbg(&udc->dev, "\n-USB REG-\n");
for (i = 0x0 ; i < USB_BASE_SIZE ; i += 16) {
reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i));
dev_dbg(&udc->dev, "USB%04x =%08x", i, (int)reg_data);
reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i + 4));
dev_dbg(&udc->dev, " %08x", (int)reg_data);
reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i + 8));
dev_dbg(&udc->dev, " %08x", (int)reg_data);
reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i + 12));
dev_dbg(&udc->dev, " %08x\n", (int)reg_data);
}
spin_lock(&udc->lock);
}
#endif /* UDC_DEBUG_DUMP */
/*-------------------------------------------------------------------------*/
/* Endpoint 0 Callback (Complete) */
static void _nbu2ss_ep0_complete(struct usb_ep *_ep, struct usb_request *_req)
{
u8 recipient;
u16 selector;
u16 wIndex;
u32 test_mode;
struct usb_ctrlrequest *p_ctrl;
struct nbu2ss_udc *udc;
if (!_ep || !_req)
return;
udc = (struct nbu2ss_udc *)_req->context;
p_ctrl = &udc->ctrl;
if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
if (p_ctrl->bRequest == USB_REQ_SET_FEATURE) {
/*-------------------------------------------------*/
/* SET_FEATURE */
recipient = (u8)(p_ctrl->bRequestType & USB_RECIP_MASK);
selector = le16_to_cpu(p_ctrl->wValue);
if ((recipient == USB_RECIP_DEVICE) &&
(selector == USB_DEVICE_TEST_MODE)) {
wIndex = le16_to_cpu(p_ctrl->wIndex);
test_mode = (u32)(wIndex >> 8);
_nbu2ss_set_test_mode(udc, test_mode);
}
}
}
}
/*-------------------------------------------------------------------------*/
/* Initialization usb_request */
static void _nbu2ss_create_ep0_packet(struct nbu2ss_udc *udc,
void *p_buf, unsigned int length)
{
udc->ep0_req.req.buf = p_buf;
udc->ep0_req.req.length = length;
udc->ep0_req.req.dma = 0;
udc->ep0_req.req.zero = true;
udc->ep0_req.req.complete = _nbu2ss_ep0_complete;
udc->ep0_req.req.status = -EINPROGRESS;
udc->ep0_req.req.context = udc;
udc->ep0_req.req.actual = 0;
}
/*-------------------------------------------------------------------------*/
/* Acquisition of the first address of RAM(FIFO) */
static u32 _nbu2ss_get_begin_ram_address(struct nbu2ss_udc *udc)
{
u32 num, buf_type;
u32 data, last_ram_adr, use_ram_size;
struct ep_regs __iomem *p_ep_regs;
last_ram_adr = (D_RAM_SIZE_CTRL / sizeof(u32)) * 2;
use_ram_size = 0;
for (num = 0; num < NUM_ENDPOINTS - 1; num++) {
p_ep_regs = &udc->p_regs->EP_REGS[num];
data = _nbu2ss_readl(&p_ep_regs->EP_PCKT_ADRS);
buf_type = _nbu2ss_readl(&p_ep_regs->EP_CONTROL) & EPN_BUF_TYPE;
if (buf_type == 0) {
/* Single Buffer */
use_ram_size += (data & EPN_MPKT) / sizeof(u32);
} else {
/* Double Buffer */
use_ram_size += ((data & EPN_MPKT) / sizeof(u32)) * 2;
}
if ((data >> 16) > last_ram_adr)
last_ram_adr = data >> 16;
}
return last_ram_adr + use_ram_size;
}
/*-------------------------------------------------------------------------*/
/* Construction of Endpoint */
static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
{
u32 num;
u32 data;
u32 begin_adrs;
if (ep->epnum == 0)
return -EINVAL;
num = ep->epnum - 1;
/*-------------------------------------------------------------*/
/* RAM Transfer Address */
begin_adrs = _nbu2ss_get_begin_ram_address(udc);
data = (begin_adrs << 16) | ep->ep.maxpacket;
_nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_PCKT_ADRS, data);
/*-------------------------------------------------------------*/
/* Interrupt Enable */
data = 1 << (ep->epnum + 8);
_nbu2ss_bitset(&udc->p_regs->USB_INT_ENA, data);
/*-------------------------------------------------------------*/
/* Endpoint Type(Mode) */
/* Bulk, Interrupt, ISO */
switch (ep->ep_type) {
case USB_ENDPOINT_XFER_BULK:
data = EPN_BULK;
break;
case USB_ENDPOINT_XFER_INT:
data = EPN_BUF_SINGLE | EPN_INTERRUPT;
break;
case USB_ENDPOINT_XFER_ISOC:
data = EPN_ISO;
break;
default:
data = 0;
break;
}
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
_nbu2ss_endpoint_toggle_reset(udc, (ep->epnum | ep->direct));
if (ep->direct == USB_DIR_OUT) {
/*---------------------------------------------------------*/
/* OUT */
data = EPN_EN | EPN_BCLR | EPN_DIR0;
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
data = EPN_ONAK | EPN_OSTL_EN | EPN_OSTL;
_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
data = EPN_OUT_EN | EPN_OUT_END_EN;
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
} else {
/*---------------------------------------------------------*/
/* IN */
data = EPN_EN | EPN_BCLR | EPN_AUTO;
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
data = EPN_ISTL;
_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
data = EPN_IN_EN | EPN_IN_END_EN;
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
}
return 0;
}
/*-------------------------------------------------------------------------*/
/* Release of Endpoint */
static int _nbu2ss_epn_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
{
u32 num;
u32 data;
if ((ep->epnum == 0) || (udc->vbus_active == 0))
return -EINVAL;
num = ep->epnum - 1;
/*-------------------------------------------------------------*/
/* RAM Transfer Address */
_nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_PCKT_ADRS, 0);
/*-------------------------------------------------------------*/
/* Interrupt Disable */
data = 1 << (ep->epnum + 8);
_nbu2ss_bitclr(&udc->p_regs->USB_INT_ENA, data);
if (ep->direct == USB_DIR_OUT) {
/*---------------------------------------------------------*/
/* OUT */
data = EPN_ONAK | EPN_BCLR;
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
data = EPN_EN | EPN_DIR0;
_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
data = EPN_OUT_EN | EPN_OUT_END_EN;
_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
} else {
/*---------------------------------------------------------*/
/* IN */
data = EPN_BCLR;
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
data = EPN_EN | EPN_AUTO;
_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
data = EPN_IN_EN | EPN_IN_END_EN;
_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
}
return 0;
}
/*-------------------------------------------------------------------------*/
/* DMA setting (without Endpoint 0) */
static void _nbu2ss_ep_dma_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
{
u32 num;
u32 data;
data = _nbu2ss_readl(&udc->p_regs->USBSSCONF);
if (((ep->epnum == 0) || (data & (1 << ep->epnum)) == 0))
return; /* Not Support DMA */
num = ep->epnum - 1;
if (ep->direct == USB_DIR_OUT) {
/*---------------------------------------------------------*/
/* OUT */
data = ep->ep.maxpacket;
_nbu2ss_writel(&udc->p_regs->EP_DCR[num].EP_DCR2, data);
/*---------------------------------------------------------*/
/* Transfer Direct */
data = DCR1_EPN_DIR0;
_nbu2ss_bitset(&udc->p_regs->EP_DCR[num].EP_DCR1, data);
/*---------------------------------------------------------*/
/* DMA Mode etc. */
data = EPN_STOP_MODE | EPN_STOP_SET | EPN_DMAMODE0;
_nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_DMA_CTRL, data);
} else {
/*---------------------------------------------------------*/
/* IN */
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, EPN_AUTO);
/*---------------------------------------------------------*/
/* DMA Mode etc. */
data = EPN_BURST_SET | EPN_DMAMODE0;
_nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_DMA_CTRL, data);
}
}
/*-------------------------------------------------------------------------*/
/* DMA setting release */
static void _nbu2ss_ep_dma_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
{
u32 num;
u32 data;
struct fc_regs __iomem *preg = udc->p_regs;
if (udc->vbus_active == 0)
return; /* VBUS OFF */
data = _nbu2ss_readl(&preg->USBSSCONF);
if ((ep->epnum == 0) || ((data & (1 << ep->epnum)) == 0))
return; /* Not Support DMA */
num = ep->epnum - 1;
_nbu2ss_ep_dma_abort(udc, ep);
if (ep->direct == USB_DIR_OUT) {
/*---------------------------------------------------------*/
/* OUT */
_nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, 0);
_nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPN_DIR0);
_nbu2ss_writel(&preg->EP_REGS[num].EP_DMA_CTRL, 0);
} else {
/*---------------------------------------------------------*/
/* IN */
_nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
_nbu2ss_writel(&preg->EP_REGS[num].EP_DMA_CTRL, 0);
}
}
/*-------------------------------------------------------------------------*/
/* Abort DMA */
static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
{
struct fc_regs __iomem *preg = udc->p_regs;
_nbu2ss_bitclr(&preg->EP_DCR[ep->epnum - 1].EP_DCR1, DCR1_EPN_REQEN);
mdelay(DMA_DISABLE_TIME); /* DCR1_EPN_REQEN Clear */
_nbu2ss_bitclr(&preg->EP_REGS[ep->epnum - 1].EP_DMA_CTRL, EPN_DMA_EN);
}
/*-------------------------------------------------------------------------*/
/* Start IN Transfer */
static void _nbu2ss_ep_in_end(struct nbu2ss_udc *udc,
u32 epnum, u32 data32, u32 length)
{
u32 data;
u32 num;
struct fc_regs __iomem *preg = udc->p_regs;
if (length >= sizeof(u32))
return;
if (epnum == 0) {
_nbu2ss_bitclr(&preg->EP0_CONTROL, EP0_AUTO);
/* Writing of 1-4 bytes */
if (length)
_nbu2ss_writel(&preg->EP0_WRITE, data32);
data = ((length << 5) & EP0_DW) | EP0_DEND;
_nbu2ss_writel(&preg->EP0_CONTROL, data);
_nbu2ss_bitset(&preg->EP0_CONTROL, EP0_AUTO);
} else {
num = epnum - 1;
_nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
/* Writing of 1-4 bytes */
if (length)
_nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32);
data = (((length) << 5) & EPN_DW) | EPN_DEND;
_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
}
}
#ifdef USE_DMA
/*-------------------------------------------------------------------------*/
static void _nbu2ss_dma_map_single(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
struct nbu2ss_req *req, u8 direct)
{
if (req->req.dma == DMA_ADDR_INVALID) {
if (req->unaligned) {
req->req.dma = ep->phys_buf;
} else {
req->req.dma = dma_map_single(udc->gadget.dev.parent,
req->req.buf,
req->req.length,
(direct == USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
}
req->mapped = 1;
} else {
if (!req->unaligned)
dma_sync_single_for_device(udc->gadget.dev.parent,
req->req.dma,
req->req.length,
(direct == USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
req->mapped = 0;
}
}
/*-------------------------------------------------------------------------*/
static void _nbu2ss_dma_unmap_single(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
struct nbu2ss_req *req, u8 direct)
{
u8 data[4];
u8 *p;
u32 count = 0;
if (direct == USB_DIR_OUT) {
count = req->req.actual % 4;
if (count) {
p = req->req.buf;
p += (req->req.actual - count);
memcpy(data, p, count);
}
}
if (req->mapped) {
if (req->unaligned) {
if (direct == USB_DIR_OUT)
memcpy(req->req.buf, ep->virt_buf,
req->req.actual & 0xfffffffc);
} else {
dma_unmap_single(udc->gadget.dev.parent,
req->req.dma, req->req.length,
(direct == USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
}
req->req.dma = DMA_ADDR_INVALID;
req->mapped = 0;
} else {
if (!req->unaligned)
dma_sync_single_for_cpu(udc->gadget.dev.parent,
req->req.dma, req->req.length,
(direct == USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
}
if (count) {
p = req->req.buf;
p += (req->req.actual - count);
memcpy(p, data, count);
}
}
#endif
/*-------------------------------------------------------------------------*/
/* Endpoint 0 OUT Transfer (PIO) */
static int ep0_out_pio(struct nbu2ss_udc *udc, u8 *buf, u32 length)
{
u32 i;
u32 numreads = length / sizeof(u32);
union usb_reg_access *buf32 = (union usb_reg_access *)buf;
if (!numreads)
return 0;
/* PIO Read */
for (i = 0; i < numreads; i++) {
buf32->dw = _nbu2ss_readl(&udc->p_regs->EP0_READ);
buf32++;
}
return numreads * sizeof(u32);
}
/*-------------------------------------------------------------------------*/
/* Endpoint 0 OUT Transfer (PIO, OverBytes) */
static int ep0_out_overbytes(struct nbu2ss_udc *udc, u8 *p_buf, u32 length)
{
u32 i;
u32 i_read_size = 0;
union usb_reg_access temp_32;
union usb_reg_access *p_buf_32 = (union usb_reg_access *)p_buf;
if ((length > 0) && (length < sizeof(u32))) {
temp_32.dw = _nbu2ss_readl(&udc->p_regs->EP0_READ);
for (i = 0 ; i < length ; i++)
p_buf_32->byte.DATA[i] = temp_32.byte.DATA[i];
i_read_size += length;
}
return i_read_size;
}
/*-------------------------------------------------------------------------*/
/* Endpoint 0 IN Transfer (PIO) */
static int EP0_in_PIO(struct nbu2ss_udc *udc, u8 *p_buf, u32 length)
{
u32 i;
u32 i_max_length = EP0_PACKETSIZE;
u32 i_word_length = 0;
u32 i_write_length = 0;
union usb_reg_access *p_buf_32 = (union usb_reg_access *)p_buf;
/*------------------------------------------------------------*/
/* Transfer Length */
if (i_max_length < length)
i_word_length = i_max_length / sizeof(u32);
else
i_word_length = length / sizeof(u32);
/*------------------------------------------------------------*/
/* PIO */
for (i = 0; i < i_word_length; i++) {
_nbu2ss_writel(&udc->p_regs->EP0_WRITE, p_buf_32->dw);
p_buf_32++;
i_write_length += sizeof(u32);
}
return i_write_length;
}
/*-------------------------------------------------------------------------*/
/* Endpoint 0 IN Transfer (PIO, OverBytes) */
static int ep0_in_overbytes(struct nbu2ss_udc *udc,
u8 *p_buf,
u32 i_remain_size)
{
u32 i;
union usb_reg_access temp_32;
union usb_reg_access *p_buf_32 = (union usb_reg_access *)p_buf;
if ((i_remain_size > 0) && (i_remain_size < sizeof(u32))) {
for (i = 0 ; i < i_remain_size ; i++)
temp_32.byte.DATA[i] = p_buf_32->byte.DATA[i];
_nbu2ss_ep_in_end(udc, 0, temp_32.dw, i_remain_size);
return i_remain_size;
}
return 0;
}
/*-------------------------------------------------------------------------*/
/* Transfer NULL Packet (Epndoint 0) */
static int EP0_send_NULL(struct nbu2ss_udc *udc, bool pid_flag)
{
u32 data;
data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
data &= ~(u32)EP0_INAK;
if (pid_flag)
data |= (EP0_INAK_EN | EP0_PIDCLR | EP0_DEND);
else
data |= (EP0_INAK_EN | EP0_DEND);
_nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
return 0;
}
/*-------------------------------------------------------------------------*/
/* Receive NULL Packet (Endpoint 0) */
static int EP0_receive_NULL(struct nbu2ss_udc *udc, bool pid_flag)
{
u32 data;
data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
data &= ~(u32)EP0_ONAK;
if (pid_flag)
data |= EP0_PIDCLR;
_nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
return 0;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_ep0_in_transfer(struct nbu2ss_udc *udc,
struct nbu2ss_req *req)
{
u8 *p_buffer; /* IN Data Buffer */
u32 data;
u32 i_remain_size = 0;
int result = 0;
/*-------------------------------------------------------------*/
/* End confirmation */
if (req->req.actual == req->req.length) {
if ((req->req.actual % EP0_PACKETSIZE) == 0) {
if (req->zero) {
req->zero = false;
EP0_send_NULL(udc, false);
return 1;
}
}
return 0; /* Transfer End */
}
/*-------------------------------------------------------------*/
/* NAK release */
data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
data |= EP0_INAK_EN;
data &= ~(u32)EP0_INAK;
_nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
i_remain_size = req->req.length - req->req.actual;
p_buffer = (u8 *)req->req.buf;
p_buffer += req->req.actual;
/*-------------------------------------------------------------*/
/* Data transfer */
result = EP0_in_PIO(udc, p_buffer, i_remain_size);
req->div_len = result;
i_remain_size -= result;
if (i_remain_size == 0) {
EP0_send_NULL(udc, false);
return result;
}
if ((i_remain_size < sizeof(u32)) && (result != EP0_PACKETSIZE)) {
p_buffer += result;
result += ep0_in_overbytes(udc, p_buffer, i_remain_size);
req->div_len = result;
}
return result;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_ep0_out_transfer(struct nbu2ss_udc *udc,
struct nbu2ss_req *req)
{
u8 *p_buffer;
u32 i_remain_size;
u32 i_recv_length;
int result = 0;
int f_rcv_zero;
/*-------------------------------------------------------------*/
/* Receive data confirmation */
i_recv_length = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA;
if (i_recv_length != 0) {
f_rcv_zero = 0;
i_remain_size = req->req.length - req->req.actual;
p_buffer = (u8 *)req->req.buf;
p_buffer += req->req.actual;
result = ep0_out_pio(udc, p_buffer
, min(i_remain_size, i_recv_length));
if (result < 0)
return result;
req->req.actual += result;
i_recv_length -= result;
if ((i_recv_length > 0) && (i_recv_length < sizeof(u32))) {
p_buffer += result;
i_remain_size -= result;
result = ep0_out_overbytes(udc, p_buffer
, min(i_remain_size, i_recv_length));
req->req.actual += result;
}
} else {
f_rcv_zero = 1;
}
/*-------------------------------------------------------------*/
/* End confirmation */
if (req->req.actual == req->req.length) {
if ((req->req.actual % EP0_PACKETSIZE) == 0) {
if (req->zero) {
req->zero = false;
EP0_receive_NULL(udc, false);
return 1;
}
}
return 0; /* Transfer End */
}
if ((req->req.actual % EP0_PACKETSIZE) != 0)
return 0; /* Short Packet Transfer End */
if (req->req.actual > req->req.length) {
dev_err(udc->dev, " *** Overrun Error\n");
return -EOVERFLOW;
}
if (f_rcv_zero != 0) {
i_remain_size = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
if (i_remain_size & EP0_ONAK) {
/*---------------------------------------------------*/
/* NACK release */
_nbu2ss_bitclr(&udc->p_regs->EP0_CONTROL, EP0_ONAK);
}
result = 1;
}
return result;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_out_dma(struct nbu2ss_udc *udc, struct nbu2ss_req *req,
u32 num, u32 length)
{
dma_addr_t p_buffer;
u32 mpkt;
u32 lmpkt;
u32 dmacnt;
u32 burst = 1;
u32 data;
int result;
struct fc_regs __iomem *preg = udc->p_regs;
if (req->dma_flag)
return 1; /* DMA is forwarded */
req->dma_flag = true;
p_buffer = req->req.dma;
p_buffer += req->req.actual;
/* DMA Address */
_nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)p_buffer);
/* Number of transfer packets */
mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPN_MPKT;
dmacnt = length / mpkt;
lmpkt = (length % mpkt) & ~(u32)0x03;
if (dmacnt > DMA_MAX_COUNT) {
dmacnt = DMA_MAX_COUNT;
lmpkt = 0;
} else if (lmpkt != 0) {
if (dmacnt == 0)
burst = 0; /* Burst OFF */
dmacnt++;
}
data = mpkt | (lmpkt << 16);
_nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, data);
data = ((dmacnt & 0xff) << 16) | DCR1_EPN_DIR0 | DCR1_EPN_REQEN;
_nbu2ss_writel(&preg->EP_DCR[num].EP_DCR1, data);
if (burst == 0) {
_nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT, 0);
_nbu2ss_bitclr(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_BURST_SET);
} else {
_nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT
, (dmacnt << 16));
_nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_BURST_SET);
}
_nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_DMA_EN);
result = length & ~(u32)0x03;
req->div_len = result;
return result;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_epn_out_pio(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
struct nbu2ss_req *req, u32 length)
{
u8 *p_buffer;
u32 i;
u32 data;
u32 i_word_length;
union usb_reg_access temp_32;
union usb_reg_access *p_buf_32;
int result = 0;
struct fc_regs __iomem *preg = udc->p_regs;
if (req->dma_flag)
return 1; /* DMA is forwarded */
if (length == 0)
return 0;
p_buffer = (u8 *)req->req.buf;
p_buf_32 = (union usb_reg_access *)(p_buffer + req->req.actual);
i_word_length = length / sizeof(u32);
if (i_word_length > 0) {
/*---------------------------------------------------------*/
/* Copy of every four bytes */
for (i = 0; i < i_word_length; i++) {
p_buf_32->dw =
_nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
p_buf_32++;
}
result = i_word_length * sizeof(u32);
}
data = length - result;
if (data > 0) {
/*---------------------------------------------------------*/
/* Copy of fraction byte */
temp_32.dw =
_nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
for (i = 0 ; i < data ; i++)
p_buf_32->byte.DATA[i] = temp_32.byte.DATA[i];
result += data;
}
req->req.actual += result;
if ((req->req.actual == req->req.length) ||
((req->req.actual % ep->ep.maxpacket) != 0)) {
result = 0;
}
return result;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_epn_out_data(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
struct nbu2ss_req *req, u32 data_size)
{
u32 num;
u32 i_buf_size;
int nret = 1;
if (ep->epnum == 0)
return -EINVAL;
num = ep->epnum - 1;
i_buf_size = min((req->req.length - req->req.actual), data_size);
if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
(i_buf_size >= sizeof(u32))) {
nret = _nbu2ss_out_dma(udc, req, num, i_buf_size);
} else {
i_buf_size = min_t(u32, i_buf_size, ep->ep.maxpacket);
nret = _nbu2ss_epn_out_pio(udc, ep, req, i_buf_size);
}
return nret;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_epn_out_transfer(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
struct nbu2ss_req *req)
{
u32 num;
u32 i_recv_length;
int result = 1;
struct fc_regs __iomem *preg = udc->p_regs;
if (ep->epnum == 0)
return -EINVAL;
num = ep->epnum - 1;
/*-------------------------------------------------------------*/
/* Receive Length */
i_recv_length =
_nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT) & EPN_LDATA;
if (i_recv_length != 0) {
result = _nbu2ss_epn_out_data(udc, ep, req, i_recv_length);
if (i_recv_length < ep->ep.maxpacket) {
if (i_recv_length == result) {
req->req.actual += result;
result = 0;
}
}
} else {
if ((req->req.actual == req->req.length) ||
((req->req.actual % ep->ep.maxpacket) != 0)) {
result = 0;
}
}
if (result == 0) {
if ((req->req.actual % ep->ep.maxpacket) == 0) {
if (req->zero) {
req->zero = false;
return 1;
}
}
}
if (req->req.actual > req->req.length) {
dev_err(udc->dev, " Overrun Error\n");
dev_err(udc->dev, " actual = %d, length = %d\n",
req->req.actual, req->req.length);
result = -EOVERFLOW;
}
return result;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_in_dma(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
struct nbu2ss_req *req, u32 num, u32 length)
{
dma_addr_t p_buffer;
u32 mpkt; /* MaxPacketSize */
u32 lmpkt; /* Last Packet Data Size */
u32 dmacnt; /* IN Data Size */
u32 i_write_length;
u32 data;
int result = -EINVAL;
struct fc_regs __iomem *preg = udc->p_regs;
if (req->dma_flag)
return 1; /* DMA is forwarded */
#ifdef USE_DMA
if (req->req.actual == 0)
_nbu2ss_dma_map_single(udc, ep, req, USB_DIR_IN);
#endif
req->dma_flag = true;
/* MAX Packet Size */
mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPN_MPKT;
i_write_length = min(DMA_MAX_COUNT * mpkt, length);
/*------------------------------------------------------------*/
/* Number of transmission packets */
if (mpkt < i_write_length) {
dmacnt = i_write_length / mpkt;
lmpkt = (i_write_length % mpkt) & ~(u32)0x3;
if (lmpkt != 0)
dmacnt++;
else
lmpkt = mpkt & ~(u32)0x3;
} else {
dmacnt = 1;
lmpkt = i_write_length & ~(u32)0x3;
}
/* Packet setting */
data = mpkt | (lmpkt << 16);
_nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, data);
/* Address setting */
p_buffer = req->req.dma;
p_buffer += req->req.actual;
_nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)p_buffer);
/* Packet and DMA setting */
data = ((dmacnt & 0xff) << 16) | DCR1_EPN_REQEN;
_nbu2ss_writel(&preg->EP_DCR[num].EP_DCR1, data);
/* Packet setting of EPC */
data = dmacnt << 16;
_nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT, data);
/*DMA setting of EPC */
_nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_DMA_EN);
result = i_write_length & ~(u32)0x3;
req->div_len = result;
return result;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_epn_in_pio(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
struct nbu2ss_req *req, u32 length)
{
u8 *p_buffer;
u32 i;
u32 data;
u32 i_word_length;
union usb_reg_access temp_32;
union usb_reg_access *p_buf_32 = NULL;
int result = 0;
struct fc_regs __iomem *preg = udc->p_regs;
if (req->dma_flag)
return 1; /* DMA is forwarded */
if (length > 0) {
p_buffer = (u8 *)req->req.buf;
p_buf_32 = (union usb_reg_access *)(p_buffer + req->req.actual);
i_word_length = length / sizeof(u32);
if (i_word_length > 0) {
for (i = 0; i < i_word_length; i++) {
_nbu2ss_writel(&preg->EP_REGS[ep->epnum - 1].EP_WRITE,
p_buf_32->dw);
p_buf_32++;
}
result = i_word_length * sizeof(u32);
}
}
if (result != ep->ep.maxpacket) {
data = length - result;
temp_32.dw = 0;
for (i = 0 ; i < data ; i++)
temp_32.byte.DATA[i] = p_buf_32->byte.DATA[i];
_nbu2ss_ep_in_end(udc, ep->epnum, temp_32.dw, data);
result += data;
}
req->div_len = result;
return result;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_epn_in_data(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
struct nbu2ss_req *req, u32 data_size)
{
u32 num;
int nret = 1;
if (ep->epnum == 0)
return -EINVAL;
num = ep->epnum - 1;
if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
(data_size >= sizeof(u32))) {
nret = _nbu2ss_in_dma(udc, ep, req, num, data_size);
} else {
data_size = min_t(u32, data_size, ep->ep.maxpacket);
nret = _nbu2ss_epn_in_pio(udc, ep, req, data_size);
}
return nret;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_epn_in_transfer(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep, struct nbu2ss_req *req)
{
u32 num;
u32 i_buf_size;
int result = 0;
u32 status;
if (ep->epnum == 0)
return -EINVAL;
num = ep->epnum - 1;
status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
/*-------------------------------------------------------------*/
/* State confirmation of FIFO */
if (req->req.actual == 0) {
if ((status & EPN_IN_EMPTY) == 0)
return 1; /* Not Empty */
} else {
if ((status & EPN_IN_FULL) != 0)
return 1; /* Not Empty */
}
/*-------------------------------------------------------------*/
/* Start transfer */
i_buf_size = req->req.length - req->req.actual;
if (i_buf_size > 0)
result = _nbu2ss_epn_in_data(udc, ep, req, i_buf_size);
else if (req->req.length == 0)
_nbu2ss_zero_len_pkt(udc, ep->epnum);
return result;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_start_transfer(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
struct nbu2ss_req *req,
bool bflag)
{
int nret = -EINVAL;
req->dma_flag = false;
req->div_len = 0;
if (req->req.length == 0) {
req->zero = false;
} else {
if ((req->req.length % ep->ep.maxpacket) == 0)
req->zero = req->req.zero;
else
req->zero = false;
}
if (ep->epnum == 0) {
/* EP0 */
switch (udc->ep0state) {
case EP0_IN_DATA_PHASE:
nret = _nbu2ss_ep0_in_transfer(udc, req);
break;
case EP0_OUT_DATA_PHASE:
nret = _nbu2ss_ep0_out_transfer(udc, req);
break;
case EP0_IN_STATUS_PHASE:
nret = EP0_send_NULL(udc, true);
break;
default:
break;
}
} else {
/* EPN */
if (ep->direct == USB_DIR_OUT) {
/* OUT */
if (!bflag)
nret = _nbu2ss_epn_out_transfer(udc, ep, req);
} else {
/* IN */
nret = _nbu2ss_epn_in_transfer(udc, ep, req);
}
}
return nret;
}
/*-------------------------------------------------------------------------*/
static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
{
u32 length;
bool bflag = false;
struct nbu2ss_req *req;
req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
if (!req)
return;
if (ep->epnum > 0) {
length = _nbu2ss_readl(&ep->udc->p_regs->EP_REGS[ep->epnum - 1].EP_LEN_DCNT);
length &= EPN_LDATA;
if (length < ep->ep.maxpacket)
bflag = true;
}
_nbu2ss_start_transfer(ep->udc, ep, req, bflag);
}
/*-------------------------------------------------------------------------*/
/* Endpoint Toggle Reset */
static void _nbu2ss_endpoint_toggle_reset(struct nbu2ss_udc *udc, u8 ep_adrs)
{
u8 num;
u32 data;
if ((ep_adrs == 0) || (ep_adrs == 0x80))
return;
num = (ep_adrs & 0x7F) - 1;
if (ep_adrs & USB_DIR_IN)
data = EPN_IPIDCLR;
else
data = EPN_BCLR | EPN_OPIDCLR;
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
}
/*-------------------------------------------------------------------------*/
/* Endpoint STALL set */
static void _nbu2ss_set_endpoint_stall(struct nbu2ss_udc *udc,
u8 ep_adrs, bool bstall)
{
u8 num, epnum;
u32 data;
struct nbu2ss_ep *ep;
struct fc_regs __iomem *preg = udc->p_regs;
if ((ep_adrs == 0) || (ep_adrs == 0x80)) {
if (bstall) {
/* Set STALL */
_nbu2ss_bitset(&preg->EP0_CONTROL, EP0_STL);
} else {
/* Clear STALL */
_nbu2ss_bitclr(&preg->EP0_CONTROL, EP0_STL);
}
} else {
epnum = ep_adrs & USB_ENDPOINT_NUMBER_MASK;
num = epnum - 1;
ep = &udc->ep[epnum];
if (bstall) {
/* Set STALL */
ep->halted = true;
if (ep_adrs & USB_DIR_IN)
data = EPN_BCLR | EPN_ISTL;
else
data = EPN_OSTL_EN | EPN_OSTL;
_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
} else {
if (ep_adrs & USB_DIR_IN) {
_nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL
, EPN_ISTL);
} else {
data =
_nbu2ss_readl(&preg->EP_REGS[num].EP_CONTROL);
data &= ~EPN_OSTL;
data |= EPN_OSTL_EN;
_nbu2ss_writel(&preg->EP_REGS[num].EP_CONTROL
, data);
}
/* Clear STALL */
ep->stalled = false;
if (ep->halted) {
ep->halted = false;
_nbu2ss_restert_transfer(ep);
}
}
}
}
/*-------------------------------------------------------------------------*/
static void _nbu2ss_set_test_mode(struct nbu2ss_udc *udc, u32 mode)
{
u32 data;
if (mode > MAX_TEST_MODE_NUM)
return;
dev_info(udc->dev, "SET FEATURE : test mode = %d\n", mode);
data = _nbu2ss_readl(&udc->p_regs->USB_CONTROL);
data &= ~TEST_FORCE_ENABLE;
data |= mode << TEST_MODE_SHIFT;
_nbu2ss_writel(&udc->p_regs->USB_CONTROL, data);
_nbu2ss_bitset(&udc->p_regs->TEST_CONTROL, CS_TESTMODEEN);
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_set_feature_device(struct nbu2ss_udc *udc,
u16 selector, u16 wIndex)
{
int result = -EOPNOTSUPP;
switch (selector) {
case USB_DEVICE_REMOTE_WAKEUP:
if (wIndex == 0x0000) {
udc->remote_wakeup = U2F_ENABLE;
result = 0;
}
break;
case USB_DEVICE_TEST_MODE:
wIndex >>= 8;
if (wIndex <= MAX_TEST_MODE_NUM)
result = 0;
break;
default:
break;
}
return result;
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_get_ep_stall(struct nbu2ss_udc *udc, u8 ep_adrs)
{
u8 epnum;
u32 data = 0, bit_data;
struct fc_regs __iomem *preg = udc->p_regs;
epnum = ep_adrs & ~USB_ENDPOINT_DIR_MASK;
if (epnum == 0) {
data = _nbu2ss_readl(&preg->EP0_CONTROL);
bit_data = EP0_STL;
} else {
data = _nbu2ss_readl(&preg->EP_REGS[epnum - 1].EP_CONTROL);
if ((data & EPN_EN) == 0)
return -1;
if (ep_adrs & USB_ENDPOINT_DIR_MASK)
bit_data = EPN_ISTL;
else
bit_data = EPN_OSTL;
}
if ((data & bit_data) == 0)
return 0;
return 1;
}
/*-------------------------------------------------------------------------*/
static inline int _nbu2ss_req_feature(struct nbu2ss_udc *udc, bool bset)
{
u8 recipient = (u8)(udc->ctrl.bRequestType & USB_RECIP_MASK);
u8 direction = (u8)(udc->ctrl.bRequestType & USB_DIR_IN);
u16 selector = le16_to_cpu(udc->ctrl.wValue);
u16 wIndex = le16_to_cpu(udc->ctrl.wIndex);
u8 ep_adrs;
int result = -EOPNOTSUPP;
if ((udc->ctrl.wLength != 0x0000) ||
(direction != USB_DIR_OUT)) {
return -EINVAL;
}
switch (recipient) {
case USB_RECIP_DEVICE:
if (bset)
result =
_nbu2ss_set_feature_device(udc, selector, wIndex);
break;
case USB_RECIP_ENDPOINT:
if (0x0000 == (wIndex & 0xFF70)) {
if (selector == USB_ENDPOINT_HALT) {
ep_adrs = wIndex & 0xFF;
if (!bset) {
_nbu2ss_endpoint_toggle_reset(udc,
ep_adrs);
}
_nbu2ss_set_endpoint_stall(udc, ep_adrs, bset);
result = 0;
}
}
break;
default:
break;
}
if (result >= 0)
_nbu2ss_create_ep0_packet(udc, udc->ep0_buf, 0);
return result;
}
/*-------------------------------------------------------------------------*/
static inline enum usb_device_speed _nbu2ss_get_speed(struct nbu2ss_udc *udc)
{
u32 data;
enum usb_device_speed speed = USB_SPEED_FULL;
data = _nbu2ss_readl(&udc->p_regs->USB_STATUS);
if (data & HIGH_SPEED)
speed = USB_SPEED_HIGH;
return speed;
}
/*-------------------------------------------------------------------------*/
static void _nbu2ss_epn_set_stall(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep)
{
u8 ep_adrs;
u32 regdata;
int limit_cnt = 0;
struct fc_regs __iomem *preg = udc->p_regs;
if (ep->direct == USB_DIR_IN) {
for (limit_cnt = 0
; limit_cnt < IN_DATA_EMPTY_COUNT
; limit_cnt++) {
regdata = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
if ((regdata & EPN_IN_DATA) == 0)
break;
mdelay(1);
}
}
ep_adrs = ep->epnum | ep->direct;
_nbu2ss_set_endpoint_stall(udc, ep_adrs, 1);
}
/*-------------------------------------------------------------------------*/
static int std_req_get_status(struct nbu2ss_udc *udc)
{
u32 length;
u16 status_data = 0;
u8 recipient = (u8)(udc->ctrl.bRequestType & USB_RECIP_MASK);
u8 direction = (u8)(udc->ctrl.bRequestType & USB_DIR_IN);
u8 ep_adrs;
int result = -EINVAL;
if ((udc->ctrl.wValue != 0x0000) || (direction != USB_DIR_IN))
return result;
length =
min_t(u16, le16_to_cpu(udc->ctrl.wLength), sizeof(status_data));
switch (recipient) {
case USB_RECIP_DEVICE:
if (udc->ctrl.wIndex == 0x0000) {
if (udc->gadget.is_selfpowered)
status_data |= BIT(USB_DEVICE_SELF_POWERED);
if (udc->remote_wakeup)
status_data |= BIT(USB_DEVICE_REMOTE_WAKEUP);
result = 0;
}
break;
case USB_RECIP_ENDPOINT:
if (0x0000 == (le16_to_cpu(udc->ctrl.wIndex) & 0xFF70)) {
ep_adrs = (u8)(le16_to_cpu(udc->ctrl.wIndex) & 0xFF);
result = _nbu2ss_get_ep_stall(udc, ep_adrs);
if (result > 0)
status_data |= BIT(USB_ENDPOINT_HALT);
}
break;
default:
break;
}
if (result >= 0) {
memcpy(udc->ep0_buf, &status_data, length);
_nbu2ss_create_ep0_packet(udc, udc->ep0_buf, length);
_nbu2ss_ep0_in_transfer(udc, &udc->ep0_req);
} else {
dev_err(udc->dev, " Error GET_STATUS\n");
}
return result;
}
/*-------------------------------------------------------------------------*/
static int std_req_clear_feature(struct nbu2ss_udc *udc)
{
return _nbu2ss_req_feature(udc, false);
}
/*-------------------------------------------------------------------------*/
static int std_req_set_feature(struct nbu2ss_udc *udc)
{
return _nbu2ss_req_feature(udc, true);
}
/*-------------------------------------------------------------------------*/
static int std_req_set_address(struct nbu2ss_udc *udc)
{
int result = 0;
u32 wValue = le16_to_cpu(udc->ctrl.wValue);
if ((udc->ctrl.bRequestType != 0x00) ||
(udc->ctrl.wIndex != 0x0000) ||
(udc->ctrl.wLength != 0x0000)) {
return -EINVAL;
}
if (wValue != (wValue & 0x007F))
return -EINVAL;
wValue <<= USB_ADRS_SHIFT;
_nbu2ss_writel(&udc->p_regs->USB_ADDRESS, wValue);
_nbu2ss_create_ep0_packet(udc, udc->ep0_buf, 0);
return result;
}
/*-------------------------------------------------------------------------*/
static int std_req_set_configuration(struct nbu2ss_udc *udc)
{
u32 config_value = (u32)(le16_to_cpu(udc->ctrl.wValue) & 0x00ff);
if ((udc->ctrl.wIndex != 0x0000) ||
(udc->ctrl.wLength != 0x0000) ||
(udc->ctrl.bRequestType != 0x00)) {
return -EINVAL;
}
udc->curr_config = config_value;
if (config_value > 0) {
_nbu2ss_bitset(&udc->p_regs->USB_CONTROL, CONF);
udc->devstate = USB_STATE_CONFIGURED;
} else {
_nbu2ss_bitclr(&udc->p_regs->USB_CONTROL, CONF);
udc->devstate = USB_STATE_ADDRESS;
}
return 0;
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_read_request_data(struct nbu2ss_udc *udc, u32 *pdata)
{
*pdata = _nbu2ss_readl(&udc->p_regs->SETUP_DATA0);
pdata++;
*pdata = _nbu2ss_readl(&udc->p_regs->SETUP_DATA1);
}
/*-------------------------------------------------------------------------*/
static inline int _nbu2ss_decode_request(struct nbu2ss_udc *udc)
{
bool bcall_back = true;
int nret = -EINVAL;
struct usb_ctrlrequest *p_ctrl;
p_ctrl = &udc->ctrl;
_nbu2ss_read_request_data(udc, (u32 *)p_ctrl);
/* ep0 state control */
if (p_ctrl->wLength == 0) {
udc->ep0state = EP0_IN_STATUS_PHASE;
} else {
if (p_ctrl->bRequestType & USB_DIR_IN)
udc->ep0state = EP0_IN_DATA_PHASE;
else
udc->ep0state = EP0_OUT_DATA_PHASE;
}
if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (p_ctrl->bRequest) {
case USB_REQ_GET_STATUS:
nret = std_req_get_status(udc);
bcall_back = false;
break;
case USB_REQ_CLEAR_FEATURE:
nret = std_req_clear_feature(udc);
bcall_back = false;
break;
case USB_REQ_SET_FEATURE:
nret = std_req_set_feature(udc);
bcall_back = false;
break;
case USB_REQ_SET_ADDRESS:
nret = std_req_set_address(udc);
bcall_back = false;
break;
case USB_REQ_SET_CONFIGURATION:
nret = std_req_set_configuration(udc);
break;
default:
break;
}
}
if (!bcall_back) {
if (udc->ep0state == EP0_IN_STATUS_PHASE) {
if (nret >= 0) {
/*--------------------------------------*/
/* Status Stage */
nret = EP0_send_NULL(udc, true);
}
}
} else {
spin_unlock(&udc->lock);
nret = udc->driver->setup(&udc->gadget, &udc->ctrl);
spin_lock(&udc->lock);
}
if (nret < 0)
udc->ep0state = EP0_IDLE;
return nret;
}
/*-------------------------------------------------------------------------*/
static inline int _nbu2ss_ep0_in_data_stage(struct nbu2ss_udc *udc)
{
int nret;
struct nbu2ss_req *req;
struct nbu2ss_ep *ep = &udc->ep[0];
req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
if (!req)
req = &udc->ep0_req;
req->req.actual += req->div_len;
req->div_len = 0;
nret = _nbu2ss_ep0_in_transfer(udc, req);
if (nret == 0) {
udc->ep0state = EP0_OUT_STATUS_PAHSE;
EP0_receive_NULL(udc, true);
}
return 0;
}
/*-------------------------------------------------------------------------*/
static inline int _nbu2ss_ep0_out_data_stage(struct nbu2ss_udc *udc)
{
int nret;
struct nbu2ss_req *req;
struct nbu2ss_ep *ep = &udc->ep[0];
req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
if (!req)
req = &udc->ep0_req;
nret = _nbu2ss_ep0_out_transfer(udc, req);
if (nret == 0) {
udc->ep0state = EP0_IN_STATUS_PHASE;
EP0_send_NULL(udc, true);
} else if (nret < 0) {
_nbu2ss_bitset(&udc->p_regs->EP0_CONTROL, EP0_BCLR);
req->req.status = nret;
}
return 0;
}
/*-------------------------------------------------------------------------*/
static inline int _nbu2ss_ep0_status_stage(struct nbu2ss_udc *udc)
{
struct nbu2ss_req *req;
struct nbu2ss_ep *ep = &udc->ep[0];
req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
if (!req) {
req = &udc->ep0_req;
if (req->req.complete)
req->req.complete(&ep->ep, &req->req);
} else {
if (req->req.complete)
_nbu2ss_ep_done(ep, req, 0);
}
udc->ep0state = EP0_IDLE;
return 0;
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
{
int i;
u32 status;
u32 intr;
int nret = -1;
status = _nbu2ss_readl(&udc->p_regs->EP0_STATUS);
intr = status & EP0_STATUS_RW_BIT;
_nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~intr);
status &= (SETUP_INT | EP0_IN_INT | EP0_OUT_INT
| STG_END_INT | EP0_OUT_NULL_INT);
if (status == 0) {
dev_info(udc->dev, "%s Not Decode Interrupt\n", __func__);
dev_info(udc->dev, "EP0_STATUS = 0x%08x\n", intr);
return;
}
if (udc->gadget.speed == USB_SPEED_UNKNOWN)
udc->gadget.speed = _nbu2ss_get_speed(udc);
for (i = 0; i < EP0_END_XFER; i++) {
switch (udc->ep0state) {
case EP0_IDLE:
if (status & SETUP_INT) {
status = 0;
nret = _nbu2ss_decode_request(udc);
}
break;
case EP0_IN_DATA_PHASE:
if (status & EP0_IN_INT) {
status &= ~EP0_IN_INT;
nret = _nbu2ss_ep0_in_data_stage(udc);
}
break;
case EP0_OUT_DATA_PHASE:
if (status & EP0_OUT_INT) {
status &= ~EP0_OUT_INT;
nret = _nbu2ss_ep0_out_data_stage(udc);
}
break;
case EP0_IN_STATUS_PHASE:
if ((status & STG_END_INT) || (status & SETUP_INT)) {
status &= ~(STG_END_INT | EP0_IN_INT);
nret = _nbu2ss_ep0_status_stage(udc);
}
break;
case EP0_OUT_STATUS_PAHSE:
if ((status & STG_END_INT) || (status & SETUP_INT) ||
(status & EP0_OUT_NULL_INT)) {
status &= ~(STG_END_INT
| EP0_OUT_INT
| EP0_OUT_NULL_INT);
nret = _nbu2ss_ep0_status_stage(udc);
}
break;
default:
status = 0;
break;
}
if (status == 0)
break;
}
if (nret < 0) {
/* Send Stall */
_nbu2ss_set_endpoint_stall(udc, 0, true);
}
}
/*-------------------------------------------------------------------------*/
static void _nbu2ss_ep_done(struct nbu2ss_ep *ep,
struct nbu2ss_req *req,
int status)
{
struct nbu2ss_udc *udc = ep->udc;
list_del_init(&req->queue);
if (status == -ECONNRESET)
_nbu2ss_fifo_flush(udc, ep);
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
if (ep->stalled) {
_nbu2ss_epn_set_stall(udc, ep);
} else {
if (!list_empty(&ep->queue))
_nbu2ss_restert_transfer(ep);
}
#ifdef USE_DMA
if ((ep->direct == USB_DIR_OUT) && (ep->epnum > 0) &&
(req->req.dma != 0))
_nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_OUT);
#endif
spin_unlock(&udc->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&udc->lock);
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_epn_in_int(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
struct nbu2ss_req *req)
{
int result = 0;
u32 status;
struct fc_regs __iomem *preg = udc->p_regs;
if (req->dma_flag)
return; /* DMA is forwarded */
req->req.actual += req->div_len;
req->div_len = 0;
if (req->req.actual != req->req.length) {
/*---------------------------------------------------------*/
/* remainder of data */
result = _nbu2ss_epn_in_transfer(udc, ep, req);
} else {
if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
status =
_nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
if ((status & EPN_IN_FULL) == 0) {
/*-----------------------------------------*/
/* 0 Length Packet */
req->zero = false;
_nbu2ss_zero_len_pkt(udc, ep->epnum);
}
return;
}
}
if (result <= 0) {
/*---------------------------------------------------------*/
/* Complete */
_nbu2ss_ep_done(ep, req, result);
}
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_epn_out_int(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
struct nbu2ss_req *req)
{
int result;
result = _nbu2ss_epn_out_transfer(udc, ep, req);
if (result <= 0)
_nbu2ss_ep_done(ep, req, result);
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_epn_in_dma_int(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
struct nbu2ss_req *req)
{
u32 mpkt;
u32 size;
struct usb_request *preq;
preq = &req->req;
if (!req->dma_flag)
return;
preq->actual += req->div_len;
req->div_len = 0;
req->dma_flag = false;
#ifdef USE_DMA
_nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_IN);
#endif
if (preq->actual != preq->length) {
_nbu2ss_epn_in_transfer(udc, ep, req);
} else {
mpkt = ep->ep.maxpacket;
size = preq->actual % mpkt;
if (size > 0) {
if (((preq->actual & 0x03) == 0) && (size < mpkt))
_nbu2ss_ep_in_end(udc, ep->epnum, 0, 0);
} else {
_nbu2ss_epn_in_int(udc, ep, req);
}
}
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_epn_out_dma_int(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
struct nbu2ss_req *req)
{
int i;
u32 num;
u32 dmacnt, ep_dmacnt;
u32 mpkt;
struct fc_regs __iomem *preg = udc->p_regs;
num = ep->epnum - 1;
if (req->req.actual == req->req.length) {
if ((req->req.length % ep->ep.maxpacket) && !req->zero) {
req->div_len = 0;
req->dma_flag = false;
_nbu2ss_ep_done(ep, req, 0);
return;
}
}
ep_dmacnt = _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT)
& EPN_DMACNT;
ep_dmacnt >>= 16;
for (i = 0; i < EPC_PLL_LOCK_COUNT; i++) {
dmacnt = _nbu2ss_readl(&preg->EP_DCR[num].EP_DCR1)
& DCR1_EPN_DMACNT;
dmacnt >>= 16;
if (ep_dmacnt == dmacnt)
break;
}
_nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPN_REQEN);
if (dmacnt != 0) {
mpkt = ep->ep.maxpacket;
if ((req->div_len % mpkt) == 0)
req->div_len -= mpkt * dmacnt;
}
if ((req->req.actual % ep->ep.maxpacket) > 0) {
if (req->req.actual == req->div_len) {
req->div_len = 0;
req->dma_flag = false;
_nbu2ss_ep_done(ep, req, 0);
return;
}
}
req->req.actual += req->div_len;
req->div_len = 0;
req->dma_flag = false;
_nbu2ss_epn_out_int(udc, ep, req);
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_epn_int(struct nbu2ss_udc *udc, u32 epnum)
{
u32 num;
u32 status;
struct nbu2ss_req *req;
struct nbu2ss_ep *ep = &udc->ep[epnum];
num = epnum - 1;
/* Interrupt Status */
status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
/* Interrupt Clear */
_nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~status);
req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
if (!req) {
/* pr_warn("=== %s(%d) req == NULL\n", __func__, epnum); */
return;
}
if (status & EPN_OUT_END_INT) {
status &= ~EPN_OUT_INT;
_nbu2ss_epn_out_dma_int(udc, ep, req);
}
if (status & EPN_OUT_INT)
_nbu2ss_epn_out_int(udc, ep, req);
if (status & EPN_IN_END_INT) {
status &= ~EPN_IN_INT;
_nbu2ss_epn_in_dma_int(udc, ep, req);
}
if (status & EPN_IN_INT)
_nbu2ss_epn_in_int(udc, ep, req);
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_ep_int(struct nbu2ss_udc *udc, u32 epnum)
{
if (epnum == 0)
_nbu2ss_ep0_int(udc);
else
_nbu2ss_epn_int(udc, epnum);
}
/*-------------------------------------------------------------------------*/
static void _nbu2ss_ep0_enable(struct nbu2ss_udc *udc)
{
_nbu2ss_bitset(&udc->p_regs->EP0_CONTROL, (EP0_AUTO | EP0_BCLR));
_nbu2ss_writel(&udc->p_regs->EP0_INT_ENA, EP0_INT_EN_BIT);
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
int status)
{
struct nbu2ss_req *req, *n;
/* Endpoint Disable */
_nbu2ss_epn_exit(udc, ep);
/* DMA Disable */
_nbu2ss_ep_dma_exit(udc, ep);
if (list_empty(&ep->queue))
return 0;
/* called with irqs blocked */
list_for_each_entry_safe(req, n, &ep->queue, queue) {
_nbu2ss_ep_done(ep, req, status);
}
return 0;
}
/*-------------------------------------------------------------------------*/
static void _nbu2ss_quiesce(struct nbu2ss_udc *udc)
{
struct nbu2ss_ep *ep;
udc->gadget.speed = USB_SPEED_UNKNOWN;
_nbu2ss_nuke(udc, &udc->ep[0], -ESHUTDOWN);
/* Endpoint n */
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
_nbu2ss_nuke(udc, ep, -ESHUTDOWN);
}
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_pullup(struct nbu2ss_udc *udc, int is_on)
{
u32 reg_dt;
if (udc->vbus_active == 0)
return -ESHUTDOWN;
if (is_on) {
/* D+ Pullup */
if (udc->driver) {
reg_dt = (_nbu2ss_readl(&udc->p_regs->USB_CONTROL)
| PUE2) & ~(u32)CONNECTB;
_nbu2ss_writel(&udc->p_regs->USB_CONTROL, reg_dt);
}
} else {
/* D+ Pulldown */
reg_dt = (_nbu2ss_readl(&udc->p_regs->USB_CONTROL) | CONNECTB)
& ~(u32)PUE2;
_nbu2ss_writel(&udc->p_regs->USB_CONTROL, reg_dt);
udc->gadget.speed = USB_SPEED_UNKNOWN;
}
return 0;
}
/*-------------------------------------------------------------------------*/
static void _nbu2ss_fifo_flush(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
{
struct fc_regs __iomem *p = udc->p_regs;
if (udc->vbus_active == 0)
return;
if (ep->epnum == 0) {
/* EP0 */
_nbu2ss_bitset(&p->EP0_CONTROL, EP0_BCLR);
} else {
/* EPN */
_nbu2ss_ep_dma_abort(udc, ep);
_nbu2ss_bitset(&p->EP_REGS[ep->epnum - 1].EP_CONTROL, EPN_BCLR);
}
}
/*-------------------------------------------------------------------------*/
static int _nbu2ss_enable_controller(struct nbu2ss_udc *udc)
{
int waitcnt = 0;
if (udc->udc_enabled)
return 0;
/* Reset */
_nbu2ss_bitset(&udc->p_regs->EPCTR, (DIRPD | EPC_RST));
udelay(EPC_RST_DISABLE_TIME); /* 1us wait */
_nbu2ss_bitclr(&udc->p_regs->EPCTR, DIRPD);
mdelay(EPC_DIRPD_DISABLE_TIME); /* 1ms wait */
_nbu2ss_bitclr(&udc->p_regs->EPCTR, EPC_RST);
_nbu2ss_writel(&udc->p_regs->AHBSCTR, WAIT_MODE);
_nbu2ss_writel(&udc->p_regs->AHBMCTR,
HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE);
while (!(_nbu2ss_readl(&udc->p_regs->EPCTR) & PLL_LOCK)) {
waitcnt++;
udelay(1); /* 1us wait */
if (waitcnt == EPC_PLL_LOCK_COUNT) {
dev_err(udc->dev, "*** Reset Cancel failed\n");
return -EINVAL;
}
}
_nbu2ss_bitset(&udc->p_regs->UTMI_CHARACTER_1, USB_SQUSET);
_nbu2ss_bitset(&udc->p_regs->USB_CONTROL, (INT_SEL | SOF_RCV));
/* EP0 */
_nbu2ss_ep0_enable(udc);
/* USB Interrupt Enable */
_nbu2ss_bitset(&udc->p_regs->USB_INT_ENA, USB_INT_EN_BIT);
udc->udc_enabled = true;
return 0;
}
/*-------------------------------------------------------------------------*/
static void _nbu2ss_reset_controller(struct nbu2ss_udc *udc)
{
_nbu2ss_bitset(&udc->p_regs->EPCTR, EPC_RST);
_nbu2ss_bitclr(&udc->p_regs->EPCTR, EPC_RST);
}
/*-------------------------------------------------------------------------*/
static void _nbu2ss_disable_controller(struct nbu2ss_udc *udc)
{
if (udc->udc_enabled) {
udc->udc_enabled = false;
_nbu2ss_reset_controller(udc);
_nbu2ss_bitset(&udc->p_regs->EPCTR, (DIRPD | EPC_RST));
}
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc)
{
int nret;
u32 reg_dt;
/* chattering */
mdelay(VBUS_CHATTERING_MDELAY); /* wait (ms) */
/* VBUS ON Check*/
reg_dt = gpiod_get_value(vbus_gpio);
if (reg_dt == 0) {
udc->linux_suspended = 0;
_nbu2ss_reset_controller(udc);
dev_info(udc->dev, " ----- VBUS OFF\n");
if (udc->vbus_active == 1) {
/* VBUS OFF */
udc->vbus_active = 0;
if (udc->usb_suspended) {
udc->usb_suspended = 0;
/* _nbu2ss_reset_controller(udc); */
}
udc->devstate = USB_STATE_NOTATTACHED;
_nbu2ss_quiesce(udc);
if (udc->driver) {
spin_unlock(&udc->lock);
udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
_nbu2ss_disable_controller(udc);
}
} else {
mdelay(5); /* wait (5ms) */
reg_dt = gpiod_get_value(vbus_gpio);
if (reg_dt == 0)
return;
dev_info(udc->dev, " ----- VBUS ON\n");
if (udc->linux_suspended)
return;
if (udc->vbus_active == 0) {
/* VBUS ON */
udc->vbus_active = 1;
udc->devstate = USB_STATE_POWERED;
nret = _nbu2ss_enable_controller(udc);
if (nret < 0) {
_nbu2ss_disable_controller(udc);
udc->vbus_active = 0;
return;
}
_nbu2ss_pullup(udc, 1);
#ifdef UDC_DEBUG_DUMP
_nbu2ss_dump_register(udc);
#endif /* UDC_DEBUG_DUMP */
} else {
if (udc->devstate == USB_STATE_POWERED)
_nbu2ss_pullup(udc, 1);
}
}
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_int_bus_reset(struct nbu2ss_udc *udc)
{
udc->devstate = USB_STATE_DEFAULT;
udc->remote_wakeup = 0;
_nbu2ss_quiesce(udc);
udc->ep0state = EP0_IDLE;
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_int_usb_resume(struct nbu2ss_udc *udc)
{
if (udc->usb_suspended == 1) {
udc->usb_suspended = 0;
if (udc->driver && udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
}
/*-------------------------------------------------------------------------*/
static inline void _nbu2ss_int_usb_suspend(struct nbu2ss_udc *udc)
{
u32 reg_dt;
if (udc->usb_suspended == 0) {
reg_dt = gpiod_get_value(vbus_gpio);
if (reg_dt == 0)
return;
udc->usb_suspended = 1;
if (udc->driver && udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
_nbu2ss_bitset(&udc->p_regs->USB_CONTROL, SUSPEND);
}
}
/*-------------------------------------------------------------------------*/
/* VBUS (GPIO153) Interrupt */
static irqreturn_t _nbu2ss_vbus_irq(int irq, void *_udc)
{
struct nbu2ss_udc *udc = (struct nbu2ss_udc *)_udc;
spin_lock(&udc->lock);
_nbu2ss_check_vbus(udc);
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
/* Interrupt (udc) */
static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
{
u8 suspend_flag = 0;
u32 status;
u32 epnum, int_bit;
struct nbu2ss_udc *udc = (struct nbu2ss_udc *)_udc;
struct fc_regs __iomem *preg = udc->p_regs;
if (gpiod_get_value(vbus_gpio) == 0) {
_nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW);
_nbu2ss_writel(&preg->USB_INT_ENA, 0);
return IRQ_HANDLED;
}
spin_lock(&udc->lock);
for (;;) {
if (gpiod_get_value(vbus_gpio) == 0) {
_nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW);
_nbu2ss_writel(&preg->USB_INT_ENA, 0);
status = 0;
} else {
status = _nbu2ss_readl(&preg->USB_INT_STA);
}
if (status == 0)
break;
_nbu2ss_writel(&preg->USB_INT_STA, ~(status & USB_INT_STA_RW));
if (status & USB_RST_INT) {
/* USB Reset */
_nbu2ss_int_bus_reset(udc);
}
if (status & RSUM_INT) {
/* Resume */
_nbu2ss_int_usb_resume(udc);
}
if (status & SPND_INT) {
/* Suspend */
suspend_flag = 1;
}
if (status & EPN_INT) {
/* EP INT */
int_bit = status >> 8;
for (epnum = 0; epnum < NUM_ENDPOINTS; epnum++) {
if (0x01 & int_bit)
_nbu2ss_ep_int(udc, epnum);
int_bit >>= 1;
if (int_bit == 0)
break;
}
}
}
if (suspend_flag)
_nbu2ss_int_usb_suspend(udc);
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
/* usb_ep_ops */
static int nbu2ss_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
u8 ep_type;
unsigned long flags;
struct nbu2ss_ep *ep;
struct nbu2ss_udc *udc;
if (!_ep || !desc) {
pr_err(" *** %s, bad param\n", __func__);
return -EINVAL;
}
ep = container_of(_ep, struct nbu2ss_ep, ep);
if (!ep->udc) {
pr_err(" *** %s, ep == NULL !!\n", __func__);
return -EINVAL;
}
ep_type = usb_endpoint_type(desc);
if ((ep_type == USB_ENDPOINT_XFER_CONTROL) ||
(ep_type == USB_ENDPOINT_XFER_ISOC)) {
pr_err(" *** %s, bat bmAttributes\n", __func__);
return -EINVAL;
}
udc = ep->udc;
if (udc->vbus_active == 0)
return -ESHUTDOWN;
if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
dev_err(ep->udc->dev, " *** %s, udc !!\n", __func__);
return -ESHUTDOWN;
}
spin_lock_irqsave(&udc->lock, flags);
ep->desc = desc;
ep->epnum = usb_endpoint_num(desc);
ep->direct = desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
ep->ep_type = ep_type;
ep->wedged = 0;
ep->halted = false;
ep->stalled = false;
ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
/* DMA setting */
_nbu2ss_ep_dma_init(udc, ep);
/* Endpoint setting */
_nbu2ss_ep_init(udc, ep);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_ep_disable(struct usb_ep *_ep)
{
struct nbu2ss_ep *ep;
struct nbu2ss_udc *udc;
unsigned long flags;
if (!_ep) {
pr_err(" *** %s, bad param\n", __func__);
return -EINVAL;
}
ep = container_of(_ep, struct nbu2ss_ep, ep);
if (!ep->udc) {
pr_err("udc: *** %s, ep == NULL !!\n", __func__);
return -EINVAL;
}
udc = ep->udc;
if (udc->vbus_active == 0)
return -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
_nbu2ss_nuke(udc, ep, -EINPROGRESS); /* dequeue request */
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_request *nbu2ss_ep_alloc_request(struct usb_ep *ep,
gfp_t gfp_flags)
{
struct nbu2ss_req *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
#ifdef USE_DMA
req->req.dma = DMA_ADDR_INVALID;
#endif
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
/*-------------------------------------------------------------------------*/
static void nbu2ss_ep_free_request(struct usb_ep *_ep,
struct usb_request *_req)
{
struct nbu2ss_req *req;
if (_req) {
req = container_of(_req, struct nbu2ss_req, req);
kfree(req);
}
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_ep_queue(struct usb_ep *_ep,
struct usb_request *_req, gfp_t gfp_flags)
{
struct nbu2ss_req *req;
struct nbu2ss_ep *ep;
struct nbu2ss_udc *udc;
unsigned long flags;
bool bflag;
int result = -EINVAL;
/* catch various bogus parameters */
if (!_ep || !_req) {
if (!_ep)
pr_err("udc: %s --- _ep == NULL\n", __func__);
if (!_req)
pr_err("udc: %s --- _req == NULL\n", __func__);
return -EINVAL;
}
req = container_of(_req, struct nbu2ss_req, req);
if (unlikely(!_req->complete ||
!_req->buf ||
!list_empty(&req->queue))) {
if (!_req->complete)
pr_err("udc: %s --- !_req->complete\n", __func__);
if (!_req->buf)
pr_err("udc:%s --- !_req->buf\n", __func__);
if (!list_empty(&req->queue))
pr_err("%s --- !list_empty(&req->queue)\n", __func__);
return -EINVAL;
}
ep = container_of(_ep, struct nbu2ss_ep, ep);
udc = ep->udc;
if (udc->vbus_active == 0) {
dev_info(udc->dev, "Can't ep_queue (VBUS OFF)\n");
return -ESHUTDOWN;
}
if (unlikely(!udc->driver)) {
dev_err(udc->dev, "%s, bogus device state %p\n", __func__,
udc->driver);
return -ESHUTDOWN;
}
spin_lock_irqsave(&udc->lock, flags);
#ifdef USE_DMA
if ((uintptr_t)req->req.buf & 0x3)
req->unaligned = true;
else
req->unaligned = false;
if (req->unaligned) {
if (!ep->virt_buf) {
ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
&ep->phys_buf,
GFP_ATOMIC | GFP_DMA);
if (!ep->virt_buf) {
spin_unlock_irqrestore(&udc->lock, flags);
return -ENOMEM;
}
}
if (ep->epnum > 0) {
if (ep->direct == USB_DIR_IN)
memcpy(ep->virt_buf, req->req.buf,
req->req.length);
}
}
if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT) &&
(req->req.dma != 0))
_nbu2ss_dma_map_single(udc, ep, req, USB_DIR_OUT);
#endif
_req->status = -EINPROGRESS;
_req->actual = 0;
bflag = list_empty(&ep->queue);
list_add_tail(&req->queue, &ep->queue);
if (bflag && !ep->stalled) {
result = _nbu2ss_start_transfer(udc, ep, req, false);
if (result < 0) {
dev_err(udc->dev, " *** %s, result = %d\n", __func__,
result);
list_del(&req->queue);
} else if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT)) {
#ifdef USE_DMA
if (req->req.length < 4 &&
req->req.length == req->req.actual)
#else
if (req->req.length == req->req.actual)
#endif
_nbu2ss_ep_done(ep, req, result);
}
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct nbu2ss_req *req;
struct nbu2ss_ep *ep;
struct nbu2ss_udc *udc;
unsigned long flags;
/* catch various bogus parameters */
if (!_ep || !_req) {
/* pr_err("%s, bad param(1)\n", __func__); */
return -EINVAL;
}
ep = container_of(_ep, struct nbu2ss_ep, ep);
udc = ep->udc;
if (!udc)
return -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
if (&req->req == _req) {
_nbu2ss_ep_done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
}
spin_unlock_irqrestore(&udc->lock, flags);
pr_debug("%s no queue(EINVAL)\n", __func__);
return -EINVAL;
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_ep_set_halt(struct usb_ep *_ep, int value)
{
u8 ep_adrs;
unsigned long flags;
struct nbu2ss_ep *ep;
struct nbu2ss_udc *udc;
if (!_ep) {
pr_err("%s, bad param\n", __func__);
return -EINVAL;
}
ep = container_of(_ep, struct nbu2ss_ep, ep);
udc = ep->udc;
if (!udc) {
dev_err(ep->udc->dev, " *** %s, bad udc\n", __func__);
return -EINVAL;
}
spin_lock_irqsave(&udc->lock, flags);
ep_adrs = ep->epnum | ep->direct;
if (value == 0) {
_nbu2ss_set_endpoint_stall(udc, ep_adrs, value);
ep->stalled = false;
} else {
if (list_empty(&ep->queue))
_nbu2ss_epn_set_stall(udc, ep);
else
ep->stalled = true;
}
if (value == 0)
ep->wedged = 0;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int nbu2ss_ep_set_wedge(struct usb_ep *_ep)
{
return nbu2ss_ep_set_halt(_ep, 1);
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_ep_fifo_status(struct usb_ep *_ep)
{
u32 data;
struct nbu2ss_ep *ep;
struct nbu2ss_udc *udc;
unsigned long flags;
struct fc_regs __iomem *preg;
if (!_ep) {
pr_err("%s, bad param\n", __func__);
return -EINVAL;
}
ep = container_of(_ep, struct nbu2ss_ep, ep);
udc = ep->udc;
if (!udc) {
dev_err(ep->udc->dev, "%s, bad udc\n", __func__);
return -EINVAL;
}
preg = udc->p_regs;
data = gpiod_get_value(vbus_gpio);
if (data == 0)
return -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
if (ep->epnum == 0) {
data = _nbu2ss_readl(&preg->EP0_LENGTH) & EP0_LDATA;
} else {
data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_LEN_DCNT)
& EPN_LDATA;
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static void nbu2ss_ep_fifo_flush(struct usb_ep *_ep)
{
u32 data;
struct nbu2ss_ep *ep;
struct nbu2ss_udc *udc;
unsigned long flags;
if (!_ep) {
pr_err("udc: %s, bad param\n", __func__);
return;
}
ep = container_of(_ep, struct nbu2ss_ep, ep);
udc = ep->udc;
if (!udc) {
dev_err(ep->udc->dev, "%s, bad udc\n", __func__);
return;
}
data = gpiod_get_value(vbus_gpio);
if (data == 0)
return;
spin_lock_irqsave(&udc->lock, flags);
_nbu2ss_fifo_flush(udc, ep);
spin_unlock_irqrestore(&udc->lock, flags);
}
/*-------------------------------------------------------------------------*/
static const struct usb_ep_ops nbu2ss_ep_ops = {
.enable = nbu2ss_ep_enable,
.disable = nbu2ss_ep_disable,
.alloc_request = nbu2ss_ep_alloc_request,
.free_request = nbu2ss_ep_free_request,
.queue = nbu2ss_ep_queue,
.dequeue = nbu2ss_ep_dequeue,
.set_halt = nbu2ss_ep_set_halt,
.set_wedge = nbu2ss_ep_set_wedge,
.fifo_status = nbu2ss_ep_fifo_status,
.fifo_flush = nbu2ss_ep_fifo_flush,
};
/*-------------------------------------------------------------------------*/
/* usb_gadget_ops */
/*-------------------------------------------------------------------------*/
static int nbu2ss_gad_get_frame(struct usb_gadget *pgadget)
{
u32 data;
struct nbu2ss_udc *udc;
if (!pgadget) {
pr_err("udc: %s, bad param\n", __func__);
return -EINVAL;
}
udc = container_of(pgadget, struct nbu2ss_udc, gadget);
data = gpiod_get_value(vbus_gpio);
if (data == 0)
return -EINVAL;
return _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME;
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_gad_wakeup(struct usb_gadget *pgadget)
{
int i;
u32 data;
struct nbu2ss_udc *udc;
if (!pgadget) {
pr_err("%s, bad param\n", __func__);
return -EINVAL;
}
udc = container_of(pgadget, struct nbu2ss_udc, gadget);
data = gpiod_get_value(vbus_gpio);
if (data == 0) {
dev_warn(&pgadget->dev, "VBUS LEVEL = %d\n", data);
return -EINVAL;
}
_nbu2ss_bitset(&udc->p_regs->EPCTR, PLL_RESUME);
for (i = 0; i < EPC_PLL_LOCK_COUNT; i++) {
data = _nbu2ss_readl(&udc->p_regs->EPCTR);
if (data & PLL_LOCK)
break;
}
_nbu2ss_bitclr(&udc->p_regs->EPCTR, PLL_RESUME);
return 0;
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_gad_set_selfpowered(struct usb_gadget *pgadget,
int is_selfpowered)
{
struct nbu2ss_udc *udc;
unsigned long flags;
if (!pgadget) {
pr_err("%s, bad param\n", __func__);
return -EINVAL;
}
udc = container_of(pgadget, struct nbu2ss_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
pgadget->is_selfpowered = (is_selfpowered != 0);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_gad_vbus_session(struct usb_gadget *pgadget, int is_active)
{
return 0;
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned int mA)
{
struct nbu2ss_udc *udc;
unsigned long flags;
if (!pgadget) {
pr_err("%s, bad param\n", __func__);
return -EINVAL;
}
udc = container_of(pgadget, struct nbu2ss_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
udc->mA = mA;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_gad_pullup(struct usb_gadget *pgadget, int is_on)
{
struct nbu2ss_udc *udc;
unsigned long flags;
if (!pgadget) {
pr_err("%s, bad param\n", __func__);
return -EINVAL;
}
udc = container_of(pgadget, struct nbu2ss_udc, gadget);
if (!udc->driver) {
pr_warn("%s, Not Regist Driver\n", __func__);
return -EINVAL;
}
if (udc->vbus_active == 0)
return -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
_nbu2ss_pullup(udc, is_on);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_gad_ioctl(struct usb_gadget *pgadget,
unsigned int code, unsigned long param)
{
return 0;
}
static const struct usb_gadget_ops nbu2ss_gadget_ops = {
.get_frame = nbu2ss_gad_get_frame,
.wakeup = nbu2ss_gad_wakeup,
.set_selfpowered = nbu2ss_gad_set_selfpowered,
.vbus_session = nbu2ss_gad_vbus_session,
.vbus_draw = nbu2ss_gad_vbus_draw,
.pullup = nbu2ss_gad_pullup,
.ioctl = nbu2ss_gad_ioctl,
};
static const struct {
const char *name;
const struct usb_ep_caps caps;
} ep_info[NUM_ENDPOINTS] = {
#define EP_INFO(_name, _caps) \
{ \
.name = _name, \
.caps = _caps, \
}
EP_INFO("ep0",
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep1-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep2-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep3in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep4-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep5-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep6-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep7-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep8in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep9-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
EP_INFO("epa-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
EP_INFO("epb-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
EP_INFO("epc-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
EP_INFO("epdin-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
#undef EP_INFO
};
/*-------------------------------------------------------------------------*/
static void nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
{
int i;
INIT_LIST_HEAD(&udc->gadget.ep_list);
udc->gadget.ep0 = &udc->ep[0].ep;
for (i = 0; i < NUM_ENDPOINTS; i++) {
struct nbu2ss_ep *ep = &udc->ep[i];
ep->udc = udc;
ep->desc = NULL;
ep->ep.driver_data = NULL;
ep->ep.name = ep_info[i].name;
ep->ep.caps = ep_info[i].caps;
ep->ep.ops = &nbu2ss_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep,
i == 0 ? EP0_PACKETSIZE
: EP_PACKETSIZE);
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
INIT_LIST_HEAD(&ep->queue);
}
list_del_init(&udc->ep[0].ep.ep_list);
}
/*-------------------------------------------------------------------------*/
/* platform_driver */
static int nbu2ss_drv_contest_init(struct platform_device *pdev,
struct nbu2ss_udc *udc)
{
spin_lock_init(&udc->lock);
udc->dev = &pdev->dev;
udc->gadget.is_selfpowered = 1;
udc->devstate = USB_STATE_NOTATTACHED;
udc->pdev = pdev;
udc->mA = 0;
udc->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
/* init Endpoint */
nbu2ss_drv_ep_init(udc);
/* init Gadget */
udc->gadget.ops = &nbu2ss_gadget_ops;
udc->gadget.ep0 = &udc->ep[0].ep;
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->gadget.name = driver_name;
/* udc->gadget.is_dualspeed = 1; */
device_initialize(&udc->gadget.dev);
dev_set_name(&udc->gadget.dev, "gadget");
udc->gadget.dev.parent = &pdev->dev;
udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
return 0;
}
/*
* probe - binds to the platform device
*/
static int nbu2ss_drv_probe(struct platform_device *pdev)
{
int status;
struct nbu2ss_udc *udc;
int irq;
void __iomem *mmio_base;
udc = &udc_controller;
memset(udc, 0, sizeof(struct nbu2ss_udc));
platform_set_drvdata(pdev, udc);
/* require I/O memory and IRQ to be provided as resources */
mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio_base))
return PTR_ERR(mmio_base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
status = devm_request_irq(&pdev->dev, irq, _nbu2ss_udc_irq,
0, driver_name, udc);
/* IO Memory */
udc->p_regs = (struct fc_regs __iomem *)mmio_base;
/* USB Function Controller Interrupt */
if (status != 0) {
dev_err(udc->dev, "request_irq(USB_UDC_IRQ_1) failed\n");
return status;
}
/* Driver Initialization */
status = nbu2ss_drv_contest_init(pdev, udc);
if (status < 0) {
/* Error */
return status;
}
/* VBUS Interrupt */
vbus_irq = gpiod_to_irq(vbus_gpio);
irq_set_irq_type(vbus_irq, IRQ_TYPE_EDGE_BOTH);
status = request_irq(vbus_irq,
_nbu2ss_vbus_irq, IRQF_SHARED, driver_name, udc);
if (status != 0) {
dev_err(udc->dev, "request_irq(vbus_irq) failed\n");
return status;
}
return status;
}
/*-------------------------------------------------------------------------*/
static void nbu2ss_drv_shutdown(struct platform_device *pdev)
{
struct nbu2ss_udc *udc;
udc = platform_get_drvdata(pdev);
if (!udc)
return;
_nbu2ss_disable_controller(udc);
}
/*-------------------------------------------------------------------------*/
static void nbu2ss_drv_remove(struct platform_device *pdev)
{
struct nbu2ss_udc *udc;
struct nbu2ss_ep *ep;
int i;
udc = &udc_controller;
for (i = 0; i < NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
if (ep->virt_buf)
dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf,
ep->phys_buf);
}
/* Interrupt Handler - Release */
free_irq(vbus_irq, udc);
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_drv_suspend(struct platform_device *pdev, pm_message_t state)
{
struct nbu2ss_udc *udc;
udc = platform_get_drvdata(pdev);
if (!udc)
return 0;
if (udc->vbus_active) {
udc->vbus_active = 0;
udc->devstate = USB_STATE_NOTATTACHED;
udc->linux_suspended = 1;
if (udc->usb_suspended) {
udc->usb_suspended = 0;
_nbu2ss_reset_controller(udc);
}
_nbu2ss_quiesce(udc);
}
_nbu2ss_disable_controller(udc);
return 0;
}
/*-------------------------------------------------------------------------*/
static int nbu2ss_drv_resume(struct platform_device *pdev)
{
u32 data;
struct nbu2ss_udc *udc;
udc = platform_get_drvdata(pdev);
if (!udc)
return 0;
data = gpiod_get_value(vbus_gpio);
if (data) {
udc->vbus_active = 1;
udc->devstate = USB_STATE_POWERED;
_nbu2ss_enable_controller(udc);
_nbu2ss_pullup(udc, 1);
}
udc->linux_suspended = 0;
return 0;
}
static struct platform_driver udc_driver = {
.probe = nbu2ss_drv_probe,
.shutdown = nbu2ss_drv_shutdown,
.remove_new = nbu2ss_drv_remove,
.suspend = nbu2ss_drv_suspend,
.resume = nbu2ss_drv_resume,
.driver = {
.name = driver_name,
},
};
module_platform_driver(udc_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Renesas Electronics Corporation");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/emxx_udc/emxx_udc.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include <linux/in.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/in6.h>
#include <linux/tcp.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <net/ndisc.h>
#include "gdm_lte.h"
#include "netlink_k.h"
#include "hci.h"
#include "hci_packet.h"
#include "gdm_endian.h"
/*
* Netlink protocol number
*/
#define NETLINK_LTE 30
/*
* Default MTU Size
*/
#define DEFAULT_MTU_SIZE 1500
#define IP_VERSION_4 4
#define IP_VERSION_6 6
static struct {
int ref_cnt;
struct sock *sock;
} lte_event;
static struct device_type wwan_type = {
.name = "wwan",
};
static int gdm_lte_open(struct net_device *dev)
{
netif_start_queue(dev);
return 0;
}
static int gdm_lte_close(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
static int gdm_lte_set_config(struct net_device *dev, struct ifmap *map)
{
if (dev->flags & IFF_UP)
return -EBUSY;
return 0;
}
static void tx_complete(void *arg)
{
struct nic *nic = arg;
if (netif_queue_stopped(nic->netdev))
netif_wake_queue(nic->netdev);
}
static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type)
{
int ret, len;
len = skb->len + ETH_HLEN;
ret = netif_rx(skb);
if (ret == NET_RX_DROP) {
nic->stats.rx_dropped++;
} else {
nic->stats.rx_packets++;
nic->stats.rx_bytes += len;
}
return 0;
}
static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
{
struct nic *nic = netdev_priv(skb_in->dev);
struct sk_buff *skb_out;
struct ethhdr eth;
struct vlan_ethhdr vlan_eth;
struct arphdr *arp_in;
struct arphdr *arp_out;
struct arpdata {
u8 ar_sha[ETH_ALEN];
u8 ar_sip[4];
u8 ar_tha[ETH_ALEN];
u8 ar_tip[4];
};
struct arpdata *arp_data_in;
struct arpdata *arp_data_out;
u8 arp_temp[60];
void *mac_header_data;
u32 mac_header_len;
/* Check for skb->len, discard if empty */
if (skb_in->len == 0)
return -ENODATA;
/* Format the mac header so that it can be put to skb */
if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
mac_header_data = &vlan_eth;
mac_header_len = VLAN_ETH_HLEN;
} else {
memcpy(ð, skb_in->data, sizeof(struct ethhdr));
mac_header_data = ð
mac_header_len = ETH_HLEN;
}
/* Get the pointer of the original request */
arp_in = (struct arphdr *)(skb_in->data + mac_header_len);
arp_data_in = (struct arpdata *)(skb_in->data + mac_header_len +
sizeof(struct arphdr));
/* Get the pointer of the outgoing response */
arp_out = (struct arphdr *)arp_temp;
arp_data_out = (struct arpdata *)(arp_temp + sizeof(struct arphdr));
/* Copy the arp header */
memcpy(arp_out, arp_in, sizeof(struct arphdr));
arp_out->ar_op = htons(ARPOP_REPLY);
/* Copy the arp payload: based on 2 bytes of mac and fill the IP */
arp_data_out->ar_sha[0] = arp_data_in->ar_sha[0];
arp_data_out->ar_sha[1] = arp_data_in->ar_sha[1];
memcpy(&arp_data_out->ar_sha[2], &arp_data_in->ar_tip[0], 4);
memcpy(&arp_data_out->ar_sip[0], &arp_data_in->ar_tip[0], 4);
memcpy(&arp_data_out->ar_tha[0], &arp_data_in->ar_sha[0], 6);
memcpy(&arp_data_out->ar_tip[0], &arp_data_in->ar_sip[0], 4);
/* Fill the destination mac with source mac of the received packet */
memcpy(mac_header_data, mac_header_data + ETH_ALEN, ETH_ALEN);
/* Fill the source mac with nic's source mac */
memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
/* Alloc skb and reserve align */
skb_out = dev_alloc_skb(skb_in->len);
if (!skb_out)
return -ENOMEM;
skb_reserve(skb_out, NET_IP_ALIGN);
skb_put_data(skb_out, mac_header_data, mac_header_len);
skb_put_data(skb_out, arp_out, sizeof(struct arphdr));
skb_put_data(skb_out, arp_data_out, sizeof(struct arpdata));
skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
skb_out->dev = skb_in->dev;
skb_reset_mac_header(skb_out);
skb_pull(skb_out, ETH_HLEN);
gdm_lte_rx(skb_out, nic, nic_type);
return 0;
}
static __sum16 icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
{
unsigned short *w;
__wsum sum = 0;
int i;
u16 pa;
union {
struct {
u8 ph_src[16];
u8 ph_dst[16];
u32 ph_len;
u8 ph_zero[3];
u8 ph_nxt;
} ph __packed;
u16 pa[20];
} pseudo_header;
memset(&pseudo_header, 0, sizeof(pseudo_header));
memcpy(&pseudo_header.ph.ph_src, &ipv6->saddr.in6_u.u6_addr8, 16);
memcpy(&pseudo_header.ph.ph_dst, &ipv6->daddr.in6_u.u6_addr8, 16);
pseudo_header.ph.ph_len = be16_to_cpu(ipv6->payload_len);
pseudo_header.ph.ph_nxt = ipv6->nexthdr;
for (i = 0; i < ARRAY_SIZE(pseudo_header.pa); i++) {
pa = pseudo_header.pa[i];
sum = csum_add(sum, csum_unfold((__force __sum16)pa));
}
w = ptr;
while (len > 1) {
sum = csum_add(sum, csum_unfold((__force __sum16)*w++));
len -= 2;
}
return csum_fold(sum);
}
static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
{
struct nic *nic = netdev_priv(skb_in->dev);
struct sk_buff *skb_out;
struct ethhdr eth;
struct vlan_ethhdr vlan_eth;
struct neighbour_advertisement {
u8 target_address[16];
u8 type;
u8 length;
u8 link_layer_address[6];
};
struct neighbour_advertisement na;
struct neighbour_solicitation {
u8 target_address[16];
};
struct neighbour_solicitation *ns;
struct ipv6hdr *ipv6_in;
struct ipv6hdr ipv6_out;
struct icmp6hdr *icmp6_in;
struct icmp6hdr icmp6_out;
void *mac_header_data;
u32 mac_header_len;
/* Format the mac header so that it can be put to skb */
if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
if (ntohs(vlan_eth.h_vlan_encapsulated_proto) != ETH_P_IPV6)
return -EPROTONOSUPPORT;
mac_header_data = &vlan_eth;
mac_header_len = VLAN_ETH_HLEN;
} else {
memcpy(ð, skb_in->data, sizeof(struct ethhdr));
if (ntohs(eth.h_proto) != ETH_P_IPV6)
return -EPROTONOSUPPORT;
mac_header_data = ð
mac_header_len = ETH_HLEN;
}
/* Check if this is IPv6 ICMP packet */
ipv6_in = (struct ipv6hdr *)(skb_in->data + mac_header_len);
if (ipv6_in->version != 6 || ipv6_in->nexthdr != IPPROTO_ICMPV6)
return -EPROTONOSUPPORT;
/* Check if this is NDP packet */
icmp6_in = (struct icmp6hdr *)(skb_in->data + mac_header_len +
sizeof(struct ipv6hdr));
if (icmp6_in->icmp6_type == NDISC_ROUTER_SOLICITATION) { /* Check RS */
return -EPROTONOSUPPORT;
} else if (icmp6_in->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
/* Check NS */
u8 icmp_na[sizeof(struct icmp6hdr) +
sizeof(struct neighbour_advertisement)];
u8 zero_addr8[16] = {0,};
if (memcmp(ipv6_in->saddr.in6_u.u6_addr8, zero_addr8, 16) == 0)
/* Duplicate Address Detection: Source IP is all zero */
return 0;
icmp6_out.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
icmp6_out.icmp6_code = 0;
icmp6_out.icmp6_cksum = 0;
/* R=0, S=1, O=1 */
icmp6_out.icmp6_dataun.un_data32[0] = htonl(0x60000000);
ns = (struct neighbour_solicitation *)
(skb_in->data + mac_header_len +
sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
memcpy(&na.target_address, ns->target_address, 16);
na.type = 0x02;
na.length = 1;
na.link_layer_address[0] = 0x00;
na.link_layer_address[1] = 0x0a;
na.link_layer_address[2] = 0x3b;
na.link_layer_address[3] = 0xaf;
na.link_layer_address[4] = 0x63;
na.link_layer_address[5] = 0xc7;
memcpy(&ipv6_out, ipv6_in, sizeof(struct ipv6hdr));
memcpy(ipv6_out.saddr.in6_u.u6_addr8, &na.target_address, 16);
memcpy(ipv6_out.daddr.in6_u.u6_addr8,
ipv6_in->saddr.in6_u.u6_addr8, 16);
ipv6_out.payload_len = htons(sizeof(struct icmp6hdr) +
sizeof(struct neighbour_advertisement));
memcpy(icmp_na, &icmp6_out, sizeof(struct icmp6hdr));
memcpy(icmp_na + sizeof(struct icmp6hdr), &na,
sizeof(struct neighbour_advertisement));
icmp6_out.icmp6_cksum = icmp6_checksum(&ipv6_out,
(u16 *)icmp_na,
sizeof(icmp_na));
} else {
return -EINVAL;
}
/* Fill the destination mac with source mac of the received packet */
memcpy(mac_header_data, mac_header_data + ETH_ALEN, ETH_ALEN);
/* Fill the source mac with nic's source mac */
memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
/* Alloc skb and reserve align */
skb_out = dev_alloc_skb(skb_in->len);
if (!skb_out)
return -ENOMEM;
skb_reserve(skb_out, NET_IP_ALIGN);
skb_put_data(skb_out, mac_header_data, mac_header_len);
skb_put_data(skb_out, &ipv6_out, sizeof(struct ipv6hdr));
skb_put_data(skb_out, &icmp6_out, sizeof(struct icmp6hdr));
skb_put_data(skb_out, &na, sizeof(struct neighbour_advertisement));
skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
skb_out->dev = skb_in->dev;
skb_reset_mac_header(skb_out);
skb_pull(skb_out, ETH_HLEN);
gdm_lte_rx(skb_out, nic, nic_type);
return 0;
}
static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
{
struct nic *nic = netdev_priv(dev);
struct ethhdr *eth;
struct vlan_ethhdr *vlan_eth;
struct iphdr *ip;
struct ipv6hdr *ipv6;
int mac_proto;
void *network_data;
u32 nic_type;
/* NIC TYPE is based on the nic_id of this net_device */
nic_type = 0x00000010 | nic->nic_id;
/* Get ethernet protocol */
eth = (struct ethhdr *)skb->data;
if (ntohs(eth->h_proto) == ETH_P_8021Q) {
vlan_eth = skb_vlan_eth_hdr(skb);
mac_proto = ntohs(vlan_eth->h_vlan_encapsulated_proto);
network_data = skb->data + VLAN_ETH_HLEN;
nic_type |= NIC_TYPE_F_VLAN;
} else {
mac_proto = ntohs(eth->h_proto);
network_data = skb->data + ETH_HLEN;
}
/* Process packet for nic type */
switch (mac_proto) {
case ETH_P_ARP:
nic_type |= NIC_TYPE_ARP;
break;
case ETH_P_IP:
nic_type |= NIC_TYPE_F_IPV4;
ip = network_data;
/* Check DHCPv4 */
if (ip->protocol == IPPROTO_UDP) {
struct udphdr *udp =
network_data + sizeof(struct iphdr);
if (ntohs(udp->dest) == 67 || ntohs(udp->dest) == 68)
nic_type |= NIC_TYPE_F_DHCP;
}
break;
case ETH_P_IPV6:
nic_type |= NIC_TYPE_F_IPV6;
ipv6 = network_data;
if (ipv6->nexthdr == IPPROTO_ICMPV6) /* Check NDP request */ {
struct icmp6hdr *icmp6 =
network_data + sizeof(struct ipv6hdr);
if (icmp6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
nic_type |= NIC_TYPE_ICMPV6;
} else if (ipv6->nexthdr == IPPROTO_UDP) /* Check DHCPv6 */ {
struct udphdr *udp =
network_data + sizeof(struct ipv6hdr);
if (ntohs(udp->dest) == 546 || ntohs(udp->dest) == 547)
nic_type |= NIC_TYPE_F_DHCP;
}
break;
default:
break;
}
return nic_type;
}
static netdev_tx_t gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
{
struct nic *nic = netdev_priv(dev);
u32 nic_type;
void *data_buf;
int data_len;
int idx;
int ret = 0;
nic_type = gdm_lte_tx_nic_type(dev, skb);
if (nic_type == 0) {
netdev_err(dev, "tx - invalid nic_type\n");
return -EMEDIUMTYPE;
}
if (nic_type & NIC_TYPE_ARP) {
if (gdm_lte_emulate_arp(skb, nic_type) == 0) {
dev_kfree_skb(skb);
return 0;
}
}
if (nic_type & NIC_TYPE_ICMPV6) {
if (gdm_lte_emulate_ndp(skb, nic_type) == 0) {
dev_kfree_skb(skb);
return 0;
}
}
/*
* Need byte shift (that is, remove VLAN tag) if there is one
* For the case of ARP, this breaks the offset as vlan_ethhdr+4
* is treated as ethhdr However, it shouldn't be a problem as
* the response starts from arp_hdr and ethhdr is created by this
* driver based on the NIC mac
*/
if (nic_type & NIC_TYPE_F_VLAN) {
struct vlan_ethhdr *vlan_eth = skb_vlan_eth_hdr(skb);
nic->vlan_id = ntohs(vlan_eth->h_vlan_TCI) & VLAN_VID_MASK;
data_buf = skb->data + (VLAN_ETH_HLEN - ETH_HLEN);
data_len = skb->len - (VLAN_ETH_HLEN - ETH_HLEN);
} else {
nic->vlan_id = 0;
data_buf = skb->data;
data_len = skb->len;
}
/* If it is a ICMPV6 packet, clear all the other bits :
* for backward compatibility with the firmware
*/
if (nic_type & NIC_TYPE_ICMPV6)
nic_type = NIC_TYPE_ICMPV6;
/* If it is not a dhcp packet, clear all the flag bits :
* original NIC, otherwise the special flag (IPVX | DHCP)
*/
if (!(nic_type & NIC_TYPE_F_DHCP))
nic_type &= NIC_TYPE_MASK;
ret = sscanf(dev->name, "lte%d", &idx);
if (ret != 1) {
dev_kfree_skb(skb);
return -EINVAL;
}
ret = nic->phy_dev->send_sdu_func(nic->phy_dev->priv_dev,
data_buf, data_len,
nic->pdn_table.dft_eps_id, 0,
tx_complete, nic, idx,
nic_type);
if (ret == TX_NO_BUFFER || ret == TX_NO_SPC) {
netif_stop_queue(dev);
if (ret == TX_NO_BUFFER)
ret = 0;
else
ret = -ENOSPC;
} else if (ret == TX_NO_DEV) {
ret = -ENODEV;
}
/* Updates tx stats */
if (ret) {
nic->stats.tx_dropped++;
} else {
nic->stats.tx_packets++;
nic->stats.tx_bytes += data_len;
}
dev_kfree_skb(skb);
return 0;
}
static struct net_device_stats *gdm_lte_stats(struct net_device *dev)
{
struct nic *nic = netdev_priv(dev);
return &nic->stats;
}
static int gdm_lte_event_send(struct net_device *dev, char *buf, int len)
{
struct phy_dev *phy_dev = ((struct nic *)netdev_priv(dev))->phy_dev;
struct hci_packet *hci = (struct hci_packet *)buf;
int length;
int idx;
int ret;
ret = sscanf(dev->name, "lte%d", &idx);
if (ret != 1)
return -EINVAL;
length = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev),
hci->len) + HCI_HEADER_SIZE;
return netlink_send(lte_event.sock, idx, 0, buf, length, dev);
}
static void gdm_lte_event_rcv(struct net_device *dev, u16 type,
void *msg, int len)
{
struct nic *nic = netdev_priv(dev);
nic->phy_dev->send_hci_func(nic->phy_dev->priv_dev, msg, len, NULL,
NULL);
}
int gdm_lte_event_init(void)
{
if (lte_event.ref_cnt == 0)
lte_event.sock = netlink_init(NETLINK_LTE, gdm_lte_event_rcv);
if (lte_event.sock) {
lte_event.ref_cnt++;
return 0;
}
pr_err("event init failed\n");
return -ENODATA;
}
void gdm_lte_event_exit(void)
{
if (lte_event.sock && --lte_event.ref_cnt == 0) {
sock_release(lte_event.sock->sk_socket);
lte_event.sock = NULL;
}
}
static int find_dev_index(u32 nic_type)
{
u8 index;
index = (u8)(nic_type & 0x0000000f);
if (index >= MAX_NIC_TYPE)
return -EINVAL;
return index;
}
static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
int len, int flagged_nic_type)
{
u32 nic_type;
struct nic *nic;
struct sk_buff *skb;
struct ethhdr eth;
struct vlan_ethhdr vlan_eth;
void *mac_header_data;
u32 mac_header_len;
char ip_version = 0;
nic_type = flagged_nic_type & NIC_TYPE_MASK;
nic = netdev_priv(dev);
if (flagged_nic_type & NIC_TYPE_F_DHCP) {
/* Change the destination mac address
* with the one requested the IP
*/
if (flagged_nic_type & NIC_TYPE_F_IPV4) {
struct dhcp_packet {
u8 op; /* BOOTREQUEST or BOOTREPLY */
u8 htype; /* hardware address type.
* 1 = 10mb ethernet
*/
u8 hlen; /* hardware address length */
u8 hops; /* used by relay agents only */
u32 xid; /* unique id */
u16 secs; /* elapsed since client began
* acquisition/renewal
*/
u16 flags; /* only one flag so far: */
#define BROADCAST_FLAG 0x8000
/* "I need broadcast replies" */
u32 ciaddr; /* client IP (if client is in
* BOUND, RENEW or REBINDING state)
*/
u32 yiaddr; /* 'your' (client) IP address */
/* IP address of next server to use in
* bootstrap, returned in DHCPOFFER,
* DHCPACK by server
*/
u32 siaddr_nip;
u32 gateway_nip; /* relay agent IP address */
u8 chaddr[16]; /* link-layer client hardware
* address (MAC)
*/
u8 sname[64]; /* server host name (ASCIZ) */
u8 file[128]; /* boot file name (ASCIZ) */
u32 cookie; /* fixed first four option
* bytes (99,130,83,99 dec)
*/
} __packed;
int offset = sizeof(struct iphdr) +
sizeof(struct udphdr) +
offsetof(struct dhcp_packet, chaddr);
if (offset + ETH_ALEN > len)
return;
ether_addr_copy(nic->dest_mac_addr, buf + offset);
}
}
if (nic->vlan_id > 0) {
mac_header_data = (void *)&vlan_eth;
mac_header_len = VLAN_ETH_HLEN;
} else {
mac_header_data = (void *)ð
mac_header_len = ETH_HLEN;
}
/* Format the data so that it can be put to skb */
ether_addr_copy(mac_header_data, nic->dest_mac_addr);
memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
vlan_eth.h_vlan_TCI = htons(nic->vlan_id);
vlan_eth.h_vlan_proto = htons(ETH_P_8021Q);
if (nic_type == NIC_TYPE_ARP) {
/* Should be response: Only happens because
* there was a request from the host
*/
eth.h_proto = htons(ETH_P_ARP);
vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_ARP);
} else {
ip_version = buf[0] >> 4;
if (ip_version == IP_VERSION_4) {
eth.h_proto = htons(ETH_P_IP);
vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_IP);
} else if (ip_version == IP_VERSION_6) {
eth.h_proto = htons(ETH_P_IPV6);
vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
} else {
netdev_err(dev, "Unknown IP version %d\n", ip_version);
return;
}
}
/* Alloc skb and reserve align */
skb = dev_alloc_skb(len + mac_header_len + NET_IP_ALIGN);
if (!skb)
return;
skb_reserve(skb, NET_IP_ALIGN);
skb_put_data(skb, mac_header_data, mac_header_len);
skb_put_data(skb, buf, len);
skb->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
skb->dev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
gdm_lte_rx(skb, nic, nic_type);
}
static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
{
struct net_device *dev;
struct multi_sdu *multi_sdu = (struct multi_sdu *)buf;
struct sdu *sdu = NULL;
u8 endian = phy_dev->get_endian(phy_dev->priv_dev);
u8 *data = (u8 *)multi_sdu->data;
int copied;
u16 i = 0;
u16 num_packet;
u16 hci_len;
u16 cmd_evt;
u32 nic_type;
int index;
num_packet = gdm_dev16_to_cpu(endian, multi_sdu->num_packet);
for (i = 0; i < num_packet; i++) {
copied = data - multi_sdu->data;
if (len < copied + sizeof(*sdu)) {
pr_err("rx prevent buffer overflow");
return;
}
sdu = (struct sdu *)data;
cmd_evt = gdm_dev16_to_cpu(endian, sdu->cmd_evt);
hci_len = gdm_dev16_to_cpu(endian, sdu->len);
nic_type = gdm_dev32_to_cpu(endian, sdu->nic_type);
if (cmd_evt != LTE_RX_SDU) {
pr_err("rx sdu wrong hci %04x\n", cmd_evt);
return;
}
if (hci_len < 12 ||
len < copied + sizeof(*sdu) + (hci_len - 12)) {
pr_err("rx sdu invalid len %d\n", hci_len);
return;
}
index = find_dev_index(nic_type);
if (index < 0) {
pr_err("rx sdu invalid nic_type :%x\n", nic_type);
return;
}
dev = phy_dev->dev[index];
gdm_lte_netif_rx(dev, (char *)sdu->data,
(int)(hci_len - 12), nic_type);
data += ((hci_len + 3) & 0xfffc) + HCI_HEADER_SIZE;
}
}
static void gdm_lte_pdn_table(struct net_device *dev, char *buf, int len)
{
struct nic *nic = netdev_priv(dev);
struct hci_pdn_table_ind *pdn_table = (struct hci_pdn_table_ind *)buf;
u8 ed = nic->phy_dev->get_endian(nic->phy_dev->priv_dev);
if (!pdn_table->activate) {
memset(&nic->pdn_table, 0x00, sizeof(struct pdn_table));
netdev_info(dev, "pdn deactivated\n");
return;
}
nic->pdn_table.activate = pdn_table->activate;
nic->pdn_table.dft_eps_id = gdm_dev32_to_cpu(ed, pdn_table->dft_eps_id);
nic->pdn_table.nic_type = gdm_dev32_to_cpu(ed, pdn_table->nic_type);
netdev_info(dev, "pdn activated, nic_type=0x%x\n",
nic->pdn_table.nic_type);
}
static int gdm_lte_receive_pkt(struct phy_dev *phy_dev, char *buf, int len)
{
struct hci_packet *hci = (struct hci_packet *)buf;
struct hci_pdn_table_ind *pdn_table = (struct hci_pdn_table_ind *)buf;
struct sdu *sdu;
struct net_device *dev;
u8 endian = phy_dev->get_endian(phy_dev->priv_dev);
int ret = 0;
u16 cmd_evt;
u32 nic_type;
int index;
if (!len)
return ret;
cmd_evt = gdm_dev16_to_cpu(endian, hci->cmd_evt);
dev = phy_dev->dev[0];
if (!dev)
return 0;
switch (cmd_evt) {
case LTE_RX_SDU:
sdu = (struct sdu *)hci->data;
nic_type = gdm_dev32_to_cpu(endian, sdu->nic_type);
index = find_dev_index(nic_type);
if (index < 0)
return index;
dev = phy_dev->dev[index];
gdm_lte_netif_rx(dev, hci->data, len, nic_type);
break;
case LTE_RX_MULTI_SDU:
gdm_lte_multi_sdu_pkt(phy_dev, buf, len);
break;
case LTE_LINK_ON_OFF_INDICATION:
netdev_info(dev, "link %s\n",
((struct hci_connect_ind *)buf)->connect
? "on" : "off");
break;
case LTE_PDN_TABLE_IND:
pdn_table = (struct hci_pdn_table_ind *)buf;
nic_type = gdm_dev32_to_cpu(endian, pdn_table->nic_type);
index = find_dev_index(nic_type);
if (index < 0)
return index;
dev = phy_dev->dev[index];
gdm_lte_pdn_table(dev, buf, len);
fallthrough;
default:
ret = gdm_lte_event_send(dev, buf, len);
break;
}
return ret;
}
static int rx_complete(void *arg, void *data, int len, int context)
{
struct phy_dev *phy_dev = arg;
return gdm_lte_receive_pkt(phy_dev, data, len);
}
void start_rx_proc(struct phy_dev *phy_dev)
{
int i;
for (i = 0; i < MAX_RX_SUBMIT_COUNT; i++)
phy_dev->rcv_func(phy_dev->priv_dev,
rx_complete, phy_dev, USB_COMPLETE);
}
static const struct net_device_ops gdm_netdev_ops = {
.ndo_open = gdm_lte_open,
.ndo_stop = gdm_lte_close,
.ndo_set_config = gdm_lte_set_config,
.ndo_start_xmit = gdm_lte_tx,
.ndo_get_stats = gdm_lte_stats,
};
static u8 gdm_lte_macaddr[ETH_ALEN] = {0x00, 0x0a, 0x3b, 0x00, 0x00, 0x00};
static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest,
u8 *mac_address, u8 index)
{
/* Form the dev_addr */
if (!mac_address)
ether_addr_copy(dev_addr, gdm_lte_macaddr);
else
ether_addr_copy(dev_addr, mac_address);
/* The last byte of the mac address
* should be less than or equal to 0xFC
*/
dev_addr[ETH_ALEN - 1] += index;
/* Create random nic src and copy the first
* 3 bytes to be the same as dev_addr
*/
eth_random_addr(nic_src);
memcpy(nic_src, dev_addr, 3);
/* Copy the nic_dest from dev_addr*/
ether_addr_copy(nic_dest, dev_addr);
}
static void validate_mac_address(u8 *mac_address)
{
/* if zero address or multicast bit set, restore the default value */
if (is_zero_ether_addr(mac_address) || (mac_address[0] & 0x01)) {
pr_err("MAC invalid, restoring default\n");
memcpy(mac_address, gdm_lte_macaddr, 6);
}
}
int register_lte_device(struct phy_dev *phy_dev,
struct device *dev, u8 *mac_address)
{
struct nic *nic;
struct net_device *net;
char pdn_dev_name[16];
u8 addr[ETH_ALEN];
int ret = 0;
u8 index;
validate_mac_address(mac_address);
for (index = 0; index < MAX_NIC_TYPE; index++) {
/* Create device name lteXpdnX */
sprintf(pdn_dev_name, "lte%%dpdn%d", index);
/* Allocate netdev */
net = alloc_netdev(sizeof(struct nic), pdn_dev_name,
NET_NAME_UNKNOWN, ether_setup);
if (!net) {
ret = -ENOMEM;
goto err;
}
net->netdev_ops = &gdm_netdev_ops;
net->flags &= ~IFF_MULTICAST;
net->mtu = DEFAULT_MTU_SIZE;
nic = netdev_priv(net);
memset(nic, 0, sizeof(struct nic));
nic->netdev = net;
nic->phy_dev = phy_dev;
nic->nic_id = index;
form_mac_address(addr,
nic->src_mac_addr,
nic->dest_mac_addr,
mac_address,
index);
eth_hw_addr_set(net, addr);
SET_NETDEV_DEV(net, dev);
SET_NETDEV_DEVTYPE(net, &wwan_type);
ret = register_netdev(net);
if (ret)
goto err;
netif_carrier_on(net);
phy_dev->dev[index] = net;
}
return 0;
err:
unregister_lte_device(phy_dev);
return ret;
}
void unregister_lte_device(struct phy_dev *phy_dev)
{
struct net_device *net;
int index;
for (index = 0; index < MAX_NIC_TYPE; index++) {
net = phy_dev->dev[index];
if (!net)
continue;
unregister_netdev(net);
free_netdev(net);
}
}
| linux-master | drivers/staging/gdm724x/gdm_lte.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/usb.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/usb/cdc.h>
#include <linux/wait.h>
#include <linux/if_ether.h>
#include <linux/pm_runtime.h>
#include "gdm_usb.h"
#include "gdm_lte.h"
#include "hci.h"
#include "hci_packet.h"
#include "gdm_endian.h"
#define USB_DEVICE_CDC_DATA(vid, pid) \
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
USB_DEVICE_ID_MATCH_INT_CLASS | \
USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
.idVendor = vid,\
.idProduct = pid,\
.bInterfaceClass = USB_CLASS_COMM,\
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
#define USB_DEVICE_MASS_DATA(vid, pid) \
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
USB_DEVICE_ID_MATCH_INT_INFO,\
.idVendor = vid,\
.idProduct = pid,\
.bInterfaceSubClass = USB_SC_SCSI, \
.bInterfaceClass = USB_CLASS_MASS_STORAGE,\
.bInterfaceProtocol = USB_PR_BULK
static const struct usb_device_id id_table[] = {
{ USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
{ USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
static void do_tx(struct work_struct *work);
static void do_rx(struct work_struct *work);
static int gdm_usb_recv(void *priv_dev,
int (*cb)(void *cb_data,
void *data, int len, int context),
void *cb_data,
int context);
static int request_mac_address(struct lte_udev *udev)
{
struct hci_packet *hci;
struct usb_device *usbdev = udev->usbdev;
int actual;
int ret = -1;
hci = kmalloc(struct_size(hci, data, 1), GFP_KERNEL);
if (!hci)
return -ENOMEM;
hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION);
hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1);
hci->data[0] = MAC_ADDRESS;
ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), hci, 5,
&actual, 1000);
udev->request_mac_addr = 1;
kfree(hci);
return ret;
}
static struct usb_tx *alloc_tx_struct(int len)
{
struct usb_tx *t = NULL;
int ret = 0;
t = kzalloc(sizeof(*t), GFP_ATOMIC);
if (!t) {
ret = -ENOMEM;
goto out;
}
t->urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!(len % 512))
len++;
t->buf = kmalloc(len, GFP_ATOMIC);
if (!t->urb || !t->buf) {
ret = -ENOMEM;
goto out;
}
out:
if (ret < 0) {
if (t) {
usb_free_urb(t->urb);
kfree(t->buf);
kfree(t);
}
return NULL;
}
return t;
}
static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
{
struct usb_tx_sdu *t_sdu;
t_sdu = kzalloc(sizeof(*t_sdu), GFP_KERNEL);
if (!t_sdu)
return NULL;
t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_KERNEL);
if (!t_sdu->buf) {
kfree(t_sdu);
return NULL;
}
return t_sdu;
}
static void free_tx_struct(struct usb_tx *t)
{
if (t) {
usb_free_urb(t->urb);
kfree(t->buf);
kfree(t);
}
}
static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
{
if (t_sdu) {
kfree(t_sdu->buf);
kfree(t_sdu);
}
}
static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
{
struct usb_tx_sdu *t_sdu;
if (list_empty(&tx->free_list))
return NULL;
t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
list_del(&t_sdu->list);
tx->avail_count--;
*no_spc = list_empty(&tx->free_list) ? 1 : 0;
return t_sdu;
}
static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
{
list_add_tail(&t_sdu->list, &tx->free_list);
tx->avail_count++;
}
static struct usb_rx *alloc_rx_struct(void)
{
struct usb_rx *r = NULL;
int ret = 0;
r = kmalloc(sizeof(*r), GFP_KERNEL);
if (!r) {
ret = -ENOMEM;
goto out;
}
r->urb = usb_alloc_urb(0, GFP_KERNEL);
r->buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
if (!r->urb || !r->buf) {
ret = -ENOMEM;
goto out;
}
out:
if (ret < 0) {
if (r) {
usb_free_urb(r->urb);
kfree(r->buf);
kfree(r);
}
return NULL;
}
return r;
}
static void free_rx_struct(struct usb_rx *r)
{
if (r) {
usb_free_urb(r->urb);
kfree(r->buf);
kfree(r);
}
}
static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
{
struct usb_rx *r;
unsigned long flags;
spin_lock_irqsave(&rx->rx_lock, flags);
if (list_empty(&rx->free_list)) {
spin_unlock_irqrestore(&rx->rx_lock, flags);
return NULL;
}
r = list_entry(rx->free_list.next, struct usb_rx, free_list);
list_del(&r->free_list);
rx->avail_count--;
*no_spc = list_empty(&rx->free_list) ? 1 : 0;
spin_unlock_irqrestore(&rx->rx_lock, flags);
return r;
}
static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
{
unsigned long flags;
spin_lock_irqsave(&rx->rx_lock, flags);
list_add_tail(&r->free_list, &rx->free_list);
rx->avail_count++;
spin_unlock_irqrestore(&rx->rx_lock, flags);
}
static void release_usb(struct lte_udev *udev)
{
struct rx_cxt *rx = &udev->rx;
struct tx_cxt *tx = &udev->tx;
struct usb_tx *t, *t_next;
struct usb_rx *r, *r_next;
struct usb_tx_sdu *t_sdu, *t_sdu_next;
unsigned long flags;
spin_lock_irqsave(&tx->lock, flags);
list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list) {
list_del(&t_sdu->list);
free_tx_sdu_struct(t_sdu);
}
list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
list_del(&t->list);
free_tx_struct(t);
}
list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list) {
list_del(&t_sdu->list);
free_tx_sdu_struct(t_sdu);
}
spin_unlock_irqrestore(&tx->lock, flags);
spin_lock_irqsave(&rx->submit_lock, flags);
list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
rx_submit_list) {
spin_unlock_irqrestore(&rx->submit_lock, flags);
usb_kill_urb(r->urb);
spin_lock_irqsave(&rx->submit_lock, flags);
}
spin_unlock_irqrestore(&rx->submit_lock, flags);
spin_lock_irqsave(&rx->rx_lock, flags);
list_for_each_entry_safe(r, r_next, &rx->free_list, free_list) {
list_del(&r->free_list);
free_rx_struct(r);
}
spin_unlock_irqrestore(&rx->rx_lock, flags);
spin_lock_irqsave(&rx->to_host_lock, flags);
list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
if (r->index == (void *)udev) {
list_del(&r->to_host_list);
free_rx_struct(r);
}
}
spin_unlock_irqrestore(&rx->to_host_lock, flags);
}
static int init_usb(struct lte_udev *udev)
{
int ret = 0;
int i;
struct tx_cxt *tx = &udev->tx;
struct rx_cxt *rx = &udev->rx;
struct usb_tx_sdu *t_sdu = NULL;
struct usb_rx *r = NULL;
udev->send_complete = 1;
udev->tx_stop = 0;
udev->request_mac_addr = 0;
udev->usb_state = PM_NORMAL;
INIT_LIST_HEAD(&tx->sdu_list);
INIT_LIST_HEAD(&tx->hci_list);
INIT_LIST_HEAD(&tx->free_list);
INIT_LIST_HEAD(&rx->rx_submit_list);
INIT_LIST_HEAD(&rx->free_list);
INIT_LIST_HEAD(&rx->to_host_list);
spin_lock_init(&tx->lock);
spin_lock_init(&rx->rx_lock);
spin_lock_init(&rx->submit_lock);
spin_lock_init(&rx->to_host_lock);
tx->avail_count = 0;
rx->avail_count = 0;
udev->rx_cb = NULL;
for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
t_sdu = alloc_tx_sdu_struct();
if (!t_sdu) {
ret = -ENOMEM;
goto fail;
}
list_add(&t_sdu->list, &tx->free_list);
tx->avail_count++;
}
for (i = 0; i < MAX_RX_SUBMIT_COUNT * 2; i++) {
r = alloc_rx_struct();
if (!r) {
ret = -ENOMEM;
goto fail;
}
list_add(&r->free_list, &rx->free_list);
rx->avail_count++;
}
INIT_DELAYED_WORK(&udev->work_tx, do_tx);
INIT_DELAYED_WORK(&udev->work_rx, do_rx);
return 0;
fail:
release_usb(udev);
return ret;
}
static int set_mac_address(u8 *data, void *arg)
{
struct phy_dev *phy_dev = arg;
struct lte_udev *udev = phy_dev->priv_dev;
struct tlv *tlv = (struct tlv *)data;
u8 mac_address[ETH_ALEN] = {0, };
if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
memcpy(mac_address, tlv->data, tlv->len);
if (register_lte_device(phy_dev,
&udev->intf->dev, mac_address) < 0)
pr_err("register lte device failed\n");
udev->request_mac_addr = 0;
return 1;
}
return 0;
}
static void do_rx(struct work_struct *work)
{
struct lte_udev *udev =
container_of(work, struct lte_udev, work_rx.work);
struct rx_cxt *rx = &udev->rx;
struct usb_rx *r;
struct hci_packet *hci;
struct phy_dev *phy_dev;
u16 cmd_evt;
int ret;
unsigned long flags;
while (1) {
spin_lock_irqsave(&rx->to_host_lock, flags);
if (list_empty(&rx->to_host_list)) {
spin_unlock_irqrestore(&rx->to_host_lock, flags);
break;
}
r = list_entry(rx->to_host_list.next,
struct usb_rx, to_host_list);
list_del(&r->to_host_list);
spin_unlock_irqrestore(&rx->to_host_lock, flags);
phy_dev = r->cb_data;
udev = phy_dev->priv_dev;
hci = (struct hci_packet *)r->buf;
cmd_evt = gdm_dev16_to_cpu(udev->gdm_ed, hci->cmd_evt);
switch (cmd_evt) {
case LTE_GET_INFORMATION_RESULT:
if (set_mac_address(hci->data, r->cb_data) == 0) {
r->callback(r->cb_data,
r->buf,
r->urb->actual_length,
KERNEL_THREAD);
}
break;
default:
if (r->callback) {
ret = r->callback(r->cb_data,
r->buf,
r->urb->actual_length,
KERNEL_THREAD);
if (ret == -EAGAIN)
pr_err("failed to send received data\n");
}
break;
}
put_rx_struct(rx, r);
gdm_usb_recv(udev,
r->callback,
r->cb_data,
USB_COMPLETE);
}
}
static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
{
unsigned long flags;
struct usb_rx *r_remove, *r_remove_next;
spin_lock_irqsave(&rx->submit_lock, flags);
list_for_each_entry_safe(r_remove, r_remove_next,
&rx->rx_submit_list, rx_submit_list) {
if (r == r_remove) {
list_del(&r->rx_submit_list);
break;
}
}
spin_unlock_irqrestore(&rx->submit_lock, flags);
}
static void gdm_usb_rcv_complete(struct urb *urb)
{
struct usb_rx *r = urb->context;
struct rx_cxt *rx = r->rx;
unsigned long flags;
struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
struct usb_device *usbdev = udev->usbdev;
remove_rx_submit_list(r, rx);
if (!urb->status && r->callback) {
spin_lock_irqsave(&rx->to_host_lock, flags);
list_add_tail(&r->to_host_list, &rx->to_host_list);
schedule_work(&udev->work_rx.work);
spin_unlock_irqrestore(&rx->to_host_lock, flags);
} else {
if (urb->status && udev->usb_state == PM_NORMAL)
dev_err(&urb->dev->dev, "%s: urb status error %d\n",
__func__, urb->status);
put_rx_struct(rx, r);
}
usb_mark_last_busy(usbdev);
}
static int gdm_usb_recv(void *priv_dev,
int (*cb)(void *cb_data,
void *data, int len, int context),
void *cb_data,
int context)
{
struct lte_udev *udev = priv_dev;
struct usb_device *usbdev = udev->usbdev;
struct rx_cxt *rx = &udev->rx;
struct usb_rx *r;
int no_spc;
int ret;
unsigned long flags;
if (!udev->usbdev) {
pr_err("invalid device\n");
return -ENODEV;
}
r = get_rx_struct(rx, &no_spc);
if (!r) {
pr_err("Out of Memory\n");
return -ENOMEM;
}
udev->rx_cb = cb;
r->callback = cb;
r->cb_data = cb_data;
r->index = (void *)udev;
r->rx = rx;
usb_fill_bulk_urb(r->urb,
usbdev,
usb_rcvbulkpipe(usbdev, 0x83),
r->buf,
RX_BUF_SIZE,
gdm_usb_rcv_complete,
r);
spin_lock_irqsave(&rx->submit_lock, flags);
list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
spin_unlock_irqrestore(&rx->submit_lock, flags);
if (context == KERNEL_THREAD)
ret = usb_submit_urb(r->urb, GFP_KERNEL);
else
ret = usb_submit_urb(r->urb, GFP_ATOMIC);
if (ret) {
spin_lock_irqsave(&rx->submit_lock, flags);
list_del(&r->rx_submit_list);
spin_unlock_irqrestore(&rx->submit_lock, flags);
pr_err("usb_submit_urb failed (%p)\n", r);
put_rx_struct(rx, r);
}
return ret;
}
static void gdm_usb_send_complete(struct urb *urb)
{
struct usb_tx *t = urb->context;
struct tx_cxt *tx = t->tx;
struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
unsigned long flags;
if (urb->status == -ECONNRESET) {
dev_info(&urb->dev->dev, "CONNRESET\n");
return;
}
if (t->callback)
t->callback(t->cb_data);
free_tx_struct(t);
spin_lock_irqsave(&tx->lock, flags);
udev->send_complete = 1;
schedule_work(&udev->work_tx.work);
spin_unlock_irqrestore(&tx->lock, flags);
}
static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
{
int ret = 0;
if (!(len % 512))
len++;
usb_fill_bulk_urb(t->urb,
usbdev,
usb_sndbulkpipe(usbdev, 2),
t->buf,
len,
gdm_usb_send_complete,
t);
ret = usb_submit_urb(t->urb, GFP_ATOMIC);
if (ret)
dev_err(&usbdev->dev, "usb_submit_urb failed: %d\n",
ret);
usb_mark_last_busy(usbdev);
return ret;
}
static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
{
struct tx_cxt *tx = &udev->tx;
struct usb_tx_sdu *t_sdu = NULL;
struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
u16 send_len = 0;
u16 num_packet = 0;
unsigned long flags;
multi_sdu->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_TX_MULTI_SDU);
while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
spin_lock_irqsave(&tx->lock, flags);
if (list_empty(&tx->sdu_list)) {
spin_unlock_irqrestore(&tx->lock, flags);
break;
}
t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
if (send_len + t_sdu->len > MAX_SDU_SIZE) {
spin_unlock_irqrestore(&tx->lock, flags);
break;
}
list_del(&t_sdu->list);
spin_unlock_irqrestore(&tx->lock, flags);
memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
send_len += (t_sdu->len + 3) & 0xfffc;
num_packet++;
if (tx->avail_count > 10)
t_sdu->callback(t_sdu->cb_data);
spin_lock_irqsave(&tx->lock, flags);
put_tx_struct(tx, t_sdu);
spin_unlock_irqrestore(&tx->lock, flags);
}
multi_sdu->len = gdm_cpu_to_dev16(udev->gdm_ed, send_len);
multi_sdu->num_packet = gdm_cpu_to_dev16(udev->gdm_ed, num_packet);
return send_len + offsetof(struct multi_sdu, data);
}
static void do_tx(struct work_struct *work)
{
struct lte_udev *udev =
container_of(work, struct lte_udev, work_tx.work);
struct usb_device *usbdev = udev->usbdev;
struct tx_cxt *tx = &udev->tx;
struct usb_tx *t = NULL;
int is_send = 0;
u32 len = 0;
unsigned long flags;
if (!usb_autopm_get_interface(udev->intf))
usb_autopm_put_interface(udev->intf);
if (udev->usb_state == PM_SUSPEND)
return;
spin_lock_irqsave(&tx->lock, flags);
if (!udev->send_complete) {
spin_unlock_irqrestore(&tx->lock, flags);
return;
}
udev->send_complete = 0;
if (!list_empty(&tx->hci_list)) {
t = list_entry(tx->hci_list.next, struct usb_tx, list);
list_del(&t->list);
len = t->len;
t->is_sdu = 0;
is_send = 1;
} else if (!list_empty(&tx->sdu_list)) {
if (udev->tx_stop) {
udev->send_complete = 1;
spin_unlock_irqrestore(&tx->lock, flags);
return;
}
t = alloc_tx_struct(TX_BUF_SIZE);
if (!t) {
spin_unlock_irqrestore(&tx->lock, flags);
return;
}
t->callback = NULL;
t->tx = tx;
t->is_sdu = 1;
is_send = 1;
}
if (!is_send) {
udev->send_complete = 1;
spin_unlock_irqrestore(&tx->lock, flags);
return;
}
spin_unlock_irqrestore(&tx->lock, flags);
if (t->is_sdu)
len = packet_aggregation(udev, t->buf);
if (send_tx_packet(usbdev, t, len)) {
pr_err("send_tx_packet failed\n");
t->callback = NULL;
gdm_usb_send_complete(t->urb);
}
}
#define SDU_PARAM_LEN 12
static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
unsigned int dft_eps_ID, unsigned int eps_ID,
void (*cb)(void *data), void *cb_data,
int dev_idx, int nic_type)
{
struct lte_udev *udev = priv_dev;
struct tx_cxt *tx = &udev->tx;
struct usb_tx_sdu *t_sdu;
struct sdu *sdu = NULL;
unsigned long flags;
int no_spc = 0;
u16 send_len;
if (!udev->usbdev) {
pr_err("sdu send - invalid device\n");
return TX_NO_DEV;
}
spin_lock_irqsave(&tx->lock, flags);
t_sdu = get_tx_sdu_struct(tx, &no_spc);
spin_unlock_irqrestore(&tx->lock, flags);
if (!t_sdu) {
pr_err("sdu send - free list empty\n");
return TX_NO_SPC;
}
sdu = (struct sdu *)t_sdu->buf;
sdu->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_TX_SDU);
if (nic_type == NIC_TYPE_ARP) {
send_len = len + SDU_PARAM_LEN;
memcpy(sdu->data, data, len);
} else {
send_len = len - ETH_HLEN;
send_len += SDU_PARAM_LEN;
memcpy(sdu->data, data + ETH_HLEN, len - ETH_HLEN);
}
sdu->len = gdm_cpu_to_dev16(udev->gdm_ed, send_len);
sdu->dft_eps_ID = gdm_cpu_to_dev32(udev->gdm_ed, dft_eps_ID);
sdu->bearer_ID = gdm_cpu_to_dev32(udev->gdm_ed, eps_ID);
sdu->nic_type = gdm_cpu_to_dev32(udev->gdm_ed, nic_type);
t_sdu->len = send_len + HCI_HEADER_SIZE;
t_sdu->callback = cb;
t_sdu->cb_data = cb_data;
spin_lock_irqsave(&tx->lock, flags);
list_add_tail(&t_sdu->list, &tx->sdu_list);
schedule_work(&udev->work_tx.work);
spin_unlock_irqrestore(&tx->lock, flags);
if (no_spc)
return TX_NO_BUFFER;
return 0;
}
static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
void (*cb)(void *data), void *cb_data)
{
struct lte_udev *udev = priv_dev;
struct tx_cxt *tx = &udev->tx;
struct usb_tx *t;
unsigned long flags;
if (!udev->usbdev) {
pr_err("hci send - invalid device\n");
return -ENODEV;
}
t = alloc_tx_struct(len);
if (!t) {
pr_err("hci_send - out of memory\n");
return -ENOMEM;
}
memcpy(t->buf, data, len);
t->callback = cb;
t->cb_data = cb_data;
t->len = len;
t->tx = tx;
t->is_sdu = 0;
spin_lock_irqsave(&tx->lock, flags);
list_add_tail(&t->list, &tx->hci_list);
schedule_work(&udev->work_tx.work);
spin_unlock_irqrestore(&tx->lock, flags);
return 0;
}
static u8 gdm_usb_get_endian(void *priv_dev)
{
struct lte_udev *udev = priv_dev;
return udev->gdm_ed;
}
static int gdm_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int ret = 0;
struct phy_dev *phy_dev = NULL;
struct lte_udev *udev = NULL;
u16 idVendor, idProduct;
int bInterfaceNumber;
struct usb_device *usbdev = interface_to_usbdev(intf);
bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
if (bInterfaceNumber > NETWORK_INTERFACE) {
pr_info("not a network device\n");
return -ENODEV;
}
phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL);
if (!phy_dev)
return -ENOMEM;
udev = kzalloc(sizeof(*udev), GFP_KERNEL);
if (!udev) {
ret = -ENOMEM;
goto err_udev;
}
phy_dev->priv_dev = (void *)udev;
phy_dev->send_hci_func = gdm_usb_hci_send;
phy_dev->send_sdu_func = gdm_usb_sdu_send;
phy_dev->rcv_func = gdm_usb_recv;
phy_dev->get_endian = gdm_usb_get_endian;
udev->usbdev = usbdev;
ret = init_usb(udev);
if (ret < 0) {
dev_err(intf->usb_dev, "init_usb func failed\n");
goto err_init_usb;
}
udev->intf = intf;
intf->needs_remote_wakeup = 1;
usb_enable_autosuspend(usbdev);
pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
/* List up hosts with big endians, otherwise,
* defaults to little endian
*/
if (idProduct == PID_GDM7243)
udev->gdm_ed = ENDIANNESS_BIG;
else
udev->gdm_ed = ENDIANNESS_LITTLE;
ret = request_mac_address(udev);
if (ret < 0) {
dev_err(intf->usb_dev, "request Mac address failed\n");
goto err_mac_address;
}
start_rx_proc(phy_dev);
usb_get_dev(usbdev);
usb_set_intfdata(intf, phy_dev);
return 0;
err_mac_address:
release_usb(udev);
err_init_usb:
kfree(udev);
err_udev:
kfree(phy_dev);
return ret;
}
static void gdm_usb_disconnect(struct usb_interface *intf)
{
struct phy_dev *phy_dev;
struct lte_udev *udev;
struct usb_device *usbdev;
usbdev = interface_to_usbdev(intf);
phy_dev = usb_get_intfdata(intf);
udev = phy_dev->priv_dev;
unregister_lte_device(phy_dev);
release_usb(udev);
kfree(udev);
udev = NULL;
kfree(phy_dev);
phy_dev = NULL;
usb_put_dev(usbdev);
}
static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
{
struct phy_dev *phy_dev;
struct lte_udev *udev;
struct rx_cxt *rx;
struct usb_rx *r;
struct usb_rx *r_next;
unsigned long flags;
phy_dev = usb_get_intfdata(intf);
udev = phy_dev->priv_dev;
rx = &udev->rx;
if (udev->usb_state != PM_NORMAL) {
dev_err(intf->usb_dev, "usb suspend - invalid state\n");
return -1;
}
udev->usb_state = PM_SUSPEND;
spin_lock_irqsave(&rx->submit_lock, flags);
list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
rx_submit_list) {
spin_unlock_irqrestore(&rx->submit_lock, flags);
usb_kill_urb(r->urb);
spin_lock_irqsave(&rx->submit_lock, flags);
}
spin_unlock_irqrestore(&rx->submit_lock, flags);
cancel_work_sync(&udev->work_tx.work);
cancel_work_sync(&udev->work_rx.work);
return 0;
}
static int gdm_usb_resume(struct usb_interface *intf)
{
struct phy_dev *phy_dev;
struct lte_udev *udev;
struct tx_cxt *tx;
struct rx_cxt *rx;
unsigned long flags;
int issue_count;
int i;
phy_dev = usb_get_intfdata(intf);
udev = phy_dev->priv_dev;
rx = &udev->rx;
if (udev->usb_state != PM_SUSPEND) {
dev_err(intf->usb_dev, "usb resume - invalid state\n");
return -1;
}
udev->usb_state = PM_NORMAL;
spin_lock_irqsave(&rx->rx_lock, flags);
issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
spin_unlock_irqrestore(&rx->rx_lock, flags);
if (issue_count >= 0) {
for (i = 0; i < issue_count; i++)
gdm_usb_recv(phy_dev->priv_dev,
udev->rx_cb,
phy_dev,
USB_COMPLETE);
}
tx = &udev->tx;
spin_lock_irqsave(&tx->lock, flags);
schedule_work(&udev->work_tx.work);
spin_unlock_irqrestore(&tx->lock, flags);
return 0;
}
static struct usb_driver gdm_usb_lte_driver = {
.name = "gdm_lte",
.probe = gdm_usb_probe,
.disconnect = gdm_usb_disconnect,
.id_table = id_table,
.supports_autosuspend = 1,
.suspend = gdm_usb_suspend,
.resume = gdm_usb_resume,
.reset_resume = gdm_usb_resume,
};
static int __init gdm_usb_lte_init(void)
{
if (gdm_lte_event_init() < 0) {
pr_err("error creating event\n");
return -1;
}
return usb_register(&gdm_usb_lte_driver);
}
static void __exit gdm_usb_lte_exit(void)
{
gdm_lte_event_exit();
usb_deregister(&gdm_usb_lte_driver);
}
module_init(gdm_usb_lte_init);
module_exit(gdm_usb_lte_exit);
MODULE_VERSION(DRIVER_VERSION);
MODULE_DESCRIPTION("GCT LTE USB Device Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/gdm724x/gdm_usb.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb/cdc.h>
#include <linux/serial.h>
#include "gdm_tty.h"
#define GDM_TTY_MAJOR 0
#define GDM_TTY_MINOR 32
#define WRITE_SIZE 2048
#define MUX_TX_MAX_SIZE 2048
static inline bool gdm_tty_ready(struct gdm *gdm)
{
return gdm && gdm->tty_dev && gdm->port.count;
}
static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
static DEFINE_MUTEX(gdm_table_lock);
static const char *DRIVER_STRING[TTY_MAX_COUNT] = {"GCTATC", "GCTDM"};
static char *DEVICE_STRING[TTY_MAX_COUNT] = {"GCT-ATC", "GCT-DM"};
static void gdm_port_destruct(struct tty_port *port)
{
struct gdm *gdm = container_of(port, struct gdm, port);
mutex_lock(&gdm_table_lock);
gdm_table[gdm->index][gdm->minor] = NULL;
mutex_unlock(&gdm_table_lock);
kfree(gdm);
}
static const struct tty_port_operations gdm_port_ops = {
.destruct = gdm_port_destruct,
};
static int gdm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct gdm *gdm = NULL;
int ret;
ret = match_string(DRIVER_STRING, TTY_MAX_COUNT,
tty->driver->driver_name);
if (ret < 0)
return -ENODEV;
mutex_lock(&gdm_table_lock);
gdm = gdm_table[ret][tty->index];
if (!gdm) {
mutex_unlock(&gdm_table_lock);
return -ENODEV;
}
tty_port_get(&gdm->port);
ret = tty_standard_install(driver, tty);
if (ret) {
tty_port_put(&gdm->port);
mutex_unlock(&gdm_table_lock);
return ret;
}
tty->driver_data = gdm;
mutex_unlock(&gdm_table_lock);
return 0;
}
static int gdm_tty_open(struct tty_struct *tty, struct file *filp)
{
struct gdm *gdm = tty->driver_data;
return tty_port_open(&gdm->port, tty, filp);
}
static void gdm_tty_cleanup(struct tty_struct *tty)
{
struct gdm *gdm = tty->driver_data;
tty_port_put(&gdm->port);
}
static void gdm_tty_hangup(struct tty_struct *tty)
{
struct gdm *gdm = tty->driver_data;
tty_port_hangup(&gdm->port);
}
static void gdm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct gdm *gdm = tty->driver_data;
tty_port_close(&gdm->port, tty, filp);
}
static int gdm_tty_recv_complete(void *data,
int len,
int index,
struct tty_dev *tty_dev,
int complete)
{
struct gdm *gdm = tty_dev->gdm[index];
if (!gdm_tty_ready(gdm)) {
if (complete == RECV_PACKET_PROCESS_COMPLETE)
gdm->tty_dev->recv_func(gdm->tty_dev->priv_dev,
gdm_tty_recv_complete);
return TO_HOST_PORT_CLOSE;
}
if (data && len) {
if (tty_buffer_request_room(&gdm->port, len) == len) {
tty_insert_flip_string(&gdm->port, data, len);
tty_flip_buffer_push(&gdm->port);
} else {
return TO_HOST_BUFFER_REQUEST_FAIL;
}
}
if (complete == RECV_PACKET_PROCESS_COMPLETE)
gdm->tty_dev->recv_func(gdm->tty_dev->priv_dev,
gdm_tty_recv_complete);
return 0;
}
static void gdm_tty_send_complete(void *arg)
{
struct gdm *gdm = arg;
if (!gdm_tty_ready(gdm))
return;
tty_port_tty_wakeup(&gdm->port);
}
static ssize_t gdm_tty_write(struct tty_struct *tty, const u8 *buf, size_t len)
{
struct gdm *gdm = tty->driver_data;
size_t remain = len;
size_t sent_len = 0;
if (!gdm_tty_ready(gdm))
return -ENODEV;
while (remain) {
size_t sending_len = min_t(size_t, MUX_TX_MAX_SIZE, remain);
gdm->tty_dev->send_func(gdm->tty_dev->priv_dev,
(void *)(buf + sent_len),
sending_len,
gdm->index,
gdm_tty_send_complete,
gdm);
sent_len += sending_len;
remain -= sending_len;
}
return len;
}
static unsigned int gdm_tty_write_room(struct tty_struct *tty)
{
struct gdm *gdm = tty->driver_data;
if (!gdm_tty_ready(gdm))
return 0;
return WRITE_SIZE;
}
int register_lte_tty_device(struct tty_dev *tty_dev, struct device *device)
{
struct gdm *gdm;
int i;
int j;
for (i = 0; i < TTY_MAX_COUNT; i++) {
gdm = kmalloc(sizeof(*gdm), GFP_KERNEL);
if (!gdm)
return -ENOMEM;
mutex_lock(&gdm_table_lock);
for (j = 0; j < GDM_TTY_MINOR; j++) {
if (!gdm_table[i][j])
break;
}
if (j == GDM_TTY_MINOR) {
kfree(gdm);
mutex_unlock(&gdm_table_lock);
return -EINVAL;
}
gdm_table[i][j] = gdm;
mutex_unlock(&gdm_table_lock);
tty_dev->gdm[i] = gdm;
tty_port_init(&gdm->port);
gdm->port.ops = &gdm_port_ops;
gdm->index = i;
gdm->minor = j;
gdm->tty_dev = tty_dev;
tty_port_register_device(&gdm->port, gdm_driver[i],
gdm->minor, device);
}
for (i = 0; i < MAX_ISSUE_NUM; i++)
gdm->tty_dev->recv_func(gdm->tty_dev->priv_dev,
gdm_tty_recv_complete);
return 0;
}
void unregister_lte_tty_device(struct tty_dev *tty_dev)
{
struct gdm *gdm;
struct tty_struct *tty;
int i;
for (i = 0; i < TTY_MAX_COUNT; i++) {
gdm = tty_dev->gdm[i];
if (!gdm)
continue;
mutex_lock(&gdm_table_lock);
gdm_table[gdm->index][gdm->minor] = NULL;
mutex_unlock(&gdm_table_lock);
tty = tty_port_tty_get(&gdm->port);
if (tty) {
tty_vhangup(tty);
tty_kref_put(tty);
}
tty_unregister_device(gdm_driver[i], gdm->minor);
tty_port_put(&gdm->port);
}
}
static const struct tty_operations gdm_tty_ops = {
.install = gdm_tty_install,
.open = gdm_tty_open,
.close = gdm_tty_close,
.cleanup = gdm_tty_cleanup,
.hangup = gdm_tty_hangup,
.write = gdm_tty_write,
.write_room = gdm_tty_write_room,
};
int register_lte_tty_driver(void)
{
struct tty_driver *tty_driver;
int i;
int ret;
for (i = 0; i < TTY_MAX_COUNT; i++) {
tty_driver = tty_alloc_driver(GDM_TTY_MINOR,
TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(tty_driver))
return PTR_ERR(tty_driver);
tty_driver->owner = THIS_MODULE;
tty_driver->driver_name = DRIVER_STRING[i];
tty_driver->name = DEVICE_STRING[i];
tty_driver->major = GDM_TTY_MAJOR;
tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
tty_driver->subtype = SERIAL_TYPE_NORMAL;
tty_driver->init_termios = tty_std_termios;
tty_driver->init_termios.c_cflag = B9600 | CS8 | HUPCL | CLOCAL;
tty_driver->init_termios.c_lflag = ISIG | ICANON | IEXTEN;
tty_set_operations(tty_driver, &gdm_tty_ops);
ret = tty_register_driver(tty_driver);
if (ret) {
tty_driver_kref_put(tty_driver);
return ret;
}
gdm_driver[i] = tty_driver;
}
return ret;
}
void unregister_lte_tty_driver(void)
{
struct tty_driver *tty_driver;
int i;
for (i = 0; i < TTY_MAX_COUNT; i++) {
tty_driver = gdm_driver[i];
if (tty_driver) {
tty_unregister_driver(tty_driver);
tty_driver_kref_put(tty_driver);
}
}
}
| linux-master | drivers/staging/gdm724x/gdm_tty.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#include <linux/kernel.h>
#include "gdm_endian.h"
__dev16 gdm_cpu_to_dev16(u8 dev_ed, u16 x)
{
if (dev_ed == ENDIANNESS_LITTLE)
return (__force __dev16)cpu_to_le16(x);
else
return (__force __dev16)cpu_to_be16(x);
}
u16 gdm_dev16_to_cpu(u8 dev_ed, __dev16 x)
{
if (dev_ed == ENDIANNESS_LITTLE)
return le16_to_cpu((__force __le16)x);
else
return be16_to_cpu((__force __be16)x);
}
__dev32 gdm_cpu_to_dev32(u8 dev_ed, u32 x)
{
if (dev_ed == ENDIANNESS_LITTLE)
return (__force __dev32)cpu_to_le32(x);
else
return (__force __dev32)cpu_to_be32(x);
}
u32 gdm_dev32_to_cpu(u8 dev_ed, __dev32 x)
{
if (dev_ed == ENDIANNESS_LITTLE)
return le32_to_cpu((__force __le32)x);
else
return be32_to_cpu((__force __be32)x);
}
| linux-master | drivers/staging/gdm724x/gdm_endian.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/usb.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/usb/cdc.h>
#include "gdm_mux.h"
static u16 packet_type_for_tty_index[TTY_MAX_COUNT] = {0xF011, 0xF010};
#define USB_DEVICE_CDC_DATA(vid, pid) \
.match_flags = \
USB_DEVICE_ID_MATCH_DEVICE |\
USB_DEVICE_ID_MATCH_INT_CLASS |\
USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
.idVendor = vid,\
.idProduct = pid,\
.bInterfaceClass = USB_CLASS_COMM,\
.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
static const struct usb_device_id id_table[] = {
{ USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
{ USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
{ USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
{ USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
{}
};
MODULE_DEVICE_TABLE(usb, id_table);
static int packet_type_to_tty_index(u16 packet_type)
{
int i;
for (i = 0; i < TTY_MAX_COUNT; i++) {
if (packet_type_for_tty_index[i] == packet_type)
return i;
}
return -1;
}
static struct mux_tx *alloc_mux_tx(int len)
{
struct mux_tx *t;
t = kzalloc(sizeof(*t), GFP_ATOMIC);
if (!t)
return NULL;
t->urb = usb_alloc_urb(0, GFP_ATOMIC);
t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
if (!t->urb || !t->buf) {
usb_free_urb(t->urb);
kfree(t->buf);
kfree(t);
return NULL;
}
return t;
}
static void free_mux_tx(struct mux_tx *t)
{
if (t) {
usb_free_urb(t->urb);
kfree(t->buf);
kfree(t);
}
}
static struct mux_rx *alloc_mux_rx(void)
{
struct mux_rx *r;
r = kzalloc(sizeof(*r), GFP_KERNEL);
if (!r)
return NULL;
r->urb = usb_alloc_urb(0, GFP_KERNEL);
r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_KERNEL);
if (!r->urb || !r->buf) {
usb_free_urb(r->urb);
kfree(r->buf);
kfree(r);
return NULL;
}
return r;
}
static void free_mux_rx(struct mux_rx *r)
{
if (r) {
usb_free_urb(r->urb);
kfree(r->buf);
kfree(r);
}
}
static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
{
struct mux_rx *r;
unsigned long flags;
spin_lock_irqsave(&rx->free_list_lock, flags);
if (list_empty(&rx->rx_free_list)) {
spin_unlock_irqrestore(&rx->free_list_lock, flags);
return NULL;
}
r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
list_del(&r->free_list);
spin_unlock_irqrestore(&rx->free_list_lock, flags);
return r;
}
static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
{
unsigned long flags;
spin_lock_irqsave(&rx->free_list_lock, flags);
list_add_tail(&r->free_list, &rx->rx_free_list);
spin_unlock_irqrestore(&rx->free_list_lock, flags);
}
static int up_to_host(struct mux_rx *r)
{
struct mux_dev *mux_dev = r->mux_dev;
struct mux_pkt_header *mux_header;
unsigned int start_flag;
unsigned int payload_size;
unsigned short packet_type;
int total_len;
u32 packet_size_sum = r->offset;
int index;
int ret = TO_HOST_INVALID_PACKET;
int len = r->len;
while (1) {
mux_header = (struct mux_pkt_header *)(r->buf +
packet_size_sum);
start_flag = __le32_to_cpu(mux_header->start_flag);
payload_size = __le32_to_cpu(mux_header->payload_size);
packet_type = __le16_to_cpu(mux_header->packet_type);
if (start_flag != START_FLAG) {
pr_err("invalid START_FLAG %x\n", start_flag);
break;
}
total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
if (len - packet_size_sum < total_len) {
pr_err("invalid payload : %d %d %04x\n",
payload_size, len, packet_type);
break;
}
index = packet_type_to_tty_index(packet_type);
if (index < 0) {
pr_err("invalid index %d\n", index);
break;
}
ret = r->callback(mux_header->data,
payload_size,
index,
mux_dev->tty_dev,
RECV_PACKET_PROCESS_CONTINUE
);
if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
r->offset += packet_size_sum;
break;
}
packet_size_sum += total_len;
if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
ret = r->callback(NULL,
0,
index,
mux_dev->tty_dev,
RECV_PACKET_PROCESS_COMPLETE
);
break;
}
}
return ret;
}
static void do_rx(struct work_struct *work)
{
struct mux_dev *mux_dev =
container_of(work, struct mux_dev, work_rx.work);
struct mux_rx *r;
struct rx_cxt *rx = &mux_dev->rx;
unsigned long flags;
int ret = 0;
while (1) {
spin_lock_irqsave(&rx->to_host_lock, flags);
if (list_empty(&rx->to_host_list)) {
spin_unlock_irqrestore(&rx->to_host_lock, flags);
break;
}
r = list_entry(rx->to_host_list.next, struct mux_rx,
to_host_list);
list_del(&r->to_host_list);
spin_unlock_irqrestore(&rx->to_host_lock, flags);
ret = up_to_host(r);
if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
pr_err("failed to send mux data to host\n");
else
put_rx_struct(rx, r);
}
}
static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
{
unsigned long flags;
struct mux_rx *r_remove, *r_remove_next;
spin_lock_irqsave(&rx->submit_list_lock, flags);
list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list,
rx_submit_list) {
if (r == r_remove)
list_del(&r->rx_submit_list);
}
spin_unlock_irqrestore(&rx->submit_list_lock, flags);
}
static void gdm_mux_rcv_complete(struct urb *urb)
{
struct mux_rx *r = urb->context;
struct mux_dev *mux_dev = r->mux_dev;
struct rx_cxt *rx = &mux_dev->rx;
unsigned long flags;
remove_rx_submit_list(r, rx);
if (urb->status) {
if (mux_dev->usb_state == PM_NORMAL)
dev_err(&urb->dev->dev, "%s: urb status error %d\n",
__func__, urb->status);
put_rx_struct(rx, r);
} else {
r->len = r->urb->actual_length;
spin_lock_irqsave(&rx->to_host_lock, flags);
list_add_tail(&r->to_host_list, &rx->to_host_list);
schedule_work(&mux_dev->work_rx.work);
spin_unlock_irqrestore(&rx->to_host_lock, flags);
}
}
static int gdm_mux_recv(void *priv_dev,
int (*cb)(void *data, int len, int tty_index,
struct tty_dev *tty_dev, int complete))
{
struct mux_dev *mux_dev = priv_dev;
struct usb_device *usbdev = mux_dev->usbdev;
struct mux_rx *r;
struct rx_cxt *rx = &mux_dev->rx;
unsigned long flags;
int ret;
if (!usbdev) {
pr_err("device is disconnected\n");
return -ENODEV;
}
r = get_rx_struct(rx);
if (!r) {
pr_err("get_rx_struct fail\n");
return -ENOMEM;
}
r->offset = 0;
r->mux_dev = (void *)mux_dev;
r->callback = cb;
mux_dev->rx_cb = cb;
usb_fill_bulk_urb(r->urb,
usbdev,
usb_rcvbulkpipe(usbdev, 0x86),
r->buf,
MUX_RX_MAX_SIZE,
gdm_mux_rcv_complete,
r);
spin_lock_irqsave(&rx->submit_list_lock, flags);
list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
spin_unlock_irqrestore(&rx->submit_list_lock, flags);
ret = usb_submit_urb(r->urb, GFP_KERNEL);
if (ret) {
spin_lock_irqsave(&rx->submit_list_lock, flags);
list_del(&r->rx_submit_list);
spin_unlock_irqrestore(&rx->submit_list_lock, flags);
put_rx_struct(rx, r);
pr_err("usb_submit_urb ret=%d\n", ret);
}
usb_mark_last_busy(usbdev);
return ret;
}
static void gdm_mux_send_complete(struct urb *urb)
{
struct mux_tx *t = urb->context;
if (urb->status == -ECONNRESET) {
dev_info(&urb->dev->dev, "CONNRESET\n");
free_mux_tx(t);
return;
}
if (t->callback)
t->callback(t->cb_data);
free_mux_tx(t);
}
static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
void (*cb)(void *data), void *cb_data)
{
struct mux_dev *mux_dev = priv_dev;
struct usb_device *usbdev = mux_dev->usbdev;
struct mux_pkt_header *mux_header;
struct mux_tx *t = NULL;
static u32 seq_num = 1;
int total_len;
int ret;
unsigned long flags;
if (mux_dev->usb_state == PM_SUSPEND) {
ret = usb_autopm_get_interface(mux_dev->intf);
if (!ret)
usb_autopm_put_interface(mux_dev->intf);
}
spin_lock_irqsave(&mux_dev->write_lock, flags);
total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
t = alloc_mux_tx(total_len);
if (!t) {
pr_err("alloc_mux_tx fail\n");
spin_unlock_irqrestore(&mux_dev->write_lock, flags);
return -ENOMEM;
}
mux_header = (struct mux_pkt_header *)t->buf;
mux_header->start_flag = __cpu_to_le32(START_FLAG);
mux_header->seq_num = __cpu_to_le32(seq_num++);
mux_header->payload_size = __cpu_to_le32((u32)len);
mux_header->packet_type = __cpu_to_le16(packet_type_for_tty_index[tty_index]);
memcpy(t->buf + MUX_HEADER_SIZE, data, len);
memset(t->buf + MUX_HEADER_SIZE + len, 0,
total_len - MUX_HEADER_SIZE - len);
t->len = total_len;
t->callback = cb;
t->cb_data = cb_data;
usb_fill_bulk_urb(t->urb,
usbdev,
usb_sndbulkpipe(usbdev, 5),
t->buf,
total_len,
gdm_mux_send_complete,
t);
ret = usb_submit_urb(t->urb, GFP_ATOMIC);
spin_unlock_irqrestore(&mux_dev->write_lock, flags);
if (ret)
pr_err("usb_submit_urb Error: %d\n", ret);
usb_mark_last_busy(usbdev);
return ret;
}
static int gdm_mux_send_control(void *priv_dev, int request, int value,
void *buf, int len)
{
struct mux_dev *mux_dev = priv_dev;
struct usb_device *usbdev = mux_dev->usbdev;
int ret;
ret = usb_control_msg(usbdev,
usb_sndctrlpipe(usbdev, 0),
request,
USB_RT_ACM,
value,
2,
buf,
len,
5000
);
if (ret < 0)
pr_err("usb_control_msg error: %d\n", ret);
return min(ret, 0);
}
static void release_usb(struct mux_dev *mux_dev)
{
struct rx_cxt *rx = &mux_dev->rx;
struct mux_rx *r, *r_next;
unsigned long flags;
cancel_delayed_work(&mux_dev->work_rx);
spin_lock_irqsave(&rx->submit_list_lock, flags);
list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
rx_submit_list) {
spin_unlock_irqrestore(&rx->submit_list_lock, flags);
usb_kill_urb(r->urb);
spin_lock_irqsave(&rx->submit_list_lock, flags);
}
spin_unlock_irqrestore(&rx->submit_list_lock, flags);
spin_lock_irqsave(&rx->free_list_lock, flags);
list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
list_del(&r->free_list);
free_mux_rx(r);
}
spin_unlock_irqrestore(&rx->free_list_lock, flags);
spin_lock_irqsave(&rx->to_host_lock, flags);
list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
if (r->mux_dev == (void *)mux_dev) {
list_del(&r->to_host_list);
free_mux_rx(r);
}
}
spin_unlock_irqrestore(&rx->to_host_lock, flags);
}
static int init_usb(struct mux_dev *mux_dev)
{
struct mux_rx *r;
struct rx_cxt *rx = &mux_dev->rx;
int ret = 0;
int i;
spin_lock_init(&mux_dev->write_lock);
INIT_LIST_HEAD(&rx->to_host_list);
INIT_LIST_HEAD(&rx->rx_submit_list);
INIT_LIST_HEAD(&rx->rx_free_list);
spin_lock_init(&rx->to_host_lock);
spin_lock_init(&rx->submit_list_lock);
spin_lock_init(&rx->free_list_lock);
for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
r = alloc_mux_rx();
if (!r) {
ret = -ENOMEM;
break;
}
list_add(&r->free_list, &rx->rx_free_list);
}
INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
return ret;
}
static int gdm_mux_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct mux_dev *mux_dev;
struct tty_dev *tty_dev;
u16 idVendor, idProduct;
int bInterfaceNumber;
int ret;
int i;
struct usb_device *usbdev = interface_to_usbdev(intf);
bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
if (bInterfaceNumber != 2)
return -ENODEV;
mux_dev = kzalloc(sizeof(*mux_dev), GFP_KERNEL);
if (!mux_dev)
return -ENOMEM;
tty_dev = kzalloc(sizeof(*tty_dev), GFP_KERNEL);
if (!tty_dev) {
ret = -ENOMEM;
goto err_free_mux;
}
mux_dev->usbdev = usbdev;
mux_dev->control_intf = intf;
ret = init_usb(mux_dev);
if (ret)
goto err_free_usb;
tty_dev->priv_dev = (void *)mux_dev;
tty_dev->send_func = gdm_mux_send;
tty_dev->recv_func = gdm_mux_recv;
tty_dev->send_control = gdm_mux_send_control;
ret = register_lte_tty_device(tty_dev, &intf->dev);
if (ret)
goto err_unregister_tty;
for (i = 0; i < TTY_MAX_COUNT; i++)
mux_dev->tty_dev = tty_dev;
mux_dev->intf = intf;
mux_dev->usb_state = PM_NORMAL;
usb_get_dev(usbdev);
usb_set_intfdata(intf, tty_dev);
return 0;
err_unregister_tty:
unregister_lte_tty_device(tty_dev);
err_free_usb:
release_usb(mux_dev);
kfree(tty_dev);
err_free_mux:
kfree(mux_dev);
return ret;
}
static void gdm_mux_disconnect(struct usb_interface *intf)
{
struct tty_dev *tty_dev;
struct mux_dev *mux_dev;
struct usb_device *usbdev = interface_to_usbdev(intf);
tty_dev = usb_get_intfdata(intf);
mux_dev = tty_dev->priv_dev;
release_usb(mux_dev);
unregister_lte_tty_device(tty_dev);
kfree(mux_dev);
kfree(tty_dev);
usb_put_dev(usbdev);
}
static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
{
struct tty_dev *tty_dev;
struct mux_dev *mux_dev;
struct rx_cxt *rx;
struct mux_rx *r, *r_next;
unsigned long flags;
tty_dev = usb_get_intfdata(intf);
mux_dev = tty_dev->priv_dev;
rx = &mux_dev->rx;
cancel_work_sync(&mux_dev->work_rx.work);
if (mux_dev->usb_state != PM_NORMAL) {
dev_err(intf->usb_dev, "usb suspend - invalid state\n");
return -1;
}
mux_dev->usb_state = PM_SUSPEND;
spin_lock_irqsave(&rx->submit_list_lock, flags);
list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
rx_submit_list) {
spin_unlock_irqrestore(&rx->submit_list_lock, flags);
usb_kill_urb(r->urb);
spin_lock_irqsave(&rx->submit_list_lock, flags);
}
spin_unlock_irqrestore(&rx->submit_list_lock, flags);
return 0;
}
static int gdm_mux_resume(struct usb_interface *intf)
{
struct tty_dev *tty_dev;
struct mux_dev *mux_dev;
u8 i;
tty_dev = usb_get_intfdata(intf);
mux_dev = tty_dev->priv_dev;
if (mux_dev->usb_state != PM_SUSPEND) {
dev_err(intf->usb_dev, "usb resume - invalid state\n");
return -1;
}
mux_dev->usb_state = PM_NORMAL;
for (i = 0; i < MAX_ISSUE_NUM; i++)
gdm_mux_recv(mux_dev, mux_dev->rx_cb);
return 0;
}
static struct usb_driver gdm_mux_driver = {
.name = "gdm_mux",
.probe = gdm_mux_probe,
.disconnect = gdm_mux_disconnect,
.id_table = id_table,
.supports_autosuspend = 1,
.suspend = gdm_mux_suspend,
.resume = gdm_mux_resume,
.reset_resume = gdm_mux_resume,
};
static int __init gdm_usb_mux_init(void)
{
int ret;
ret = register_lte_tty_driver();
if (ret)
return ret;
return usb_register(&gdm_mux_driver);
}
static void __exit gdm_usb_mux_exit(void)
{
usb_deregister(&gdm_mux_driver);
unregister_lte_tty_driver();
}
module_init(gdm_usb_mux_init);
module_exit(gdm_usb_mux_exit);
MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/gdm724x/gdm_mux.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/etherdevice.h>
#include <linux/netlink.h>
#include <asm/byteorder.h>
#include <net/sock.h>
#include "netlink_k.h"
static DEFINE_MUTEX(netlink_mutex);
#define ND_MAX_GROUP 30
#define ND_IFINDEX_LEN sizeof(int)
#define ND_NLMSG_SPACE(len) (NLMSG_SPACE(len) + ND_IFINDEX_LEN)
#define ND_NLMSG_DATA(nlh) ((void *)((char *)NLMSG_DATA(nlh) + \
ND_IFINDEX_LEN))
#define ND_NLMSG_S_LEN(len) ((len) + ND_IFINDEX_LEN)
#define ND_NLMSG_R_LEN(nlh) ((nlh)->nlmsg_len - ND_IFINDEX_LEN)
#define ND_NLMSG_IFIDX(nlh) NLMSG_DATA(nlh)
#define ND_MAX_MSG_LEN (1024 * 32)
static void (*rcv_cb)(struct net_device *dev, u16 type, void *msg, int len);
static void netlink_rcv_cb(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
struct net_device *dev;
u32 mlen;
void *msg;
int ifindex;
if (!rcv_cb) {
pr_err("nl cb - unregistered\n");
return;
}
if (skb->len < NLMSG_HDRLEN) {
pr_err("nl cb - invalid skb length\n");
return;
}
nlh = (struct nlmsghdr *)skb->data;
if (skb->len < nlh->nlmsg_len || nlh->nlmsg_len > ND_MAX_MSG_LEN) {
pr_err("nl cb - invalid length (%d,%d)\n",
skb->len, nlh->nlmsg_len);
return;
}
memcpy(&ifindex, ND_NLMSG_IFIDX(nlh), ND_IFINDEX_LEN);
msg = ND_NLMSG_DATA(nlh);
mlen = ND_NLMSG_R_LEN(nlh);
dev = dev_get_by_index(&init_net, ifindex);
if (dev) {
rcv_cb(dev, nlh->nlmsg_type, msg, mlen);
dev_put(dev);
} else {
pr_err("nl cb - dev (%d) not found\n", ifindex);
}
}
static void netlink_rcv(struct sk_buff *skb)
{
mutex_lock(&netlink_mutex);
netlink_rcv_cb(skb);
mutex_unlock(&netlink_mutex);
}
struct sock *netlink_init(int unit,
void (*cb)(struct net_device *dev, u16 type,
void *msg, int len))
{
struct sock *sock;
struct netlink_kernel_cfg cfg = {
.input = netlink_rcv,
};
sock = netlink_kernel_create(&init_net, unit, &cfg);
if (sock)
rcv_cb = cb;
return sock;
}
int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len,
struct net_device *dev)
{
static u32 seq;
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
int ret = 0;
if (group > ND_MAX_GROUP)
return -EINVAL;
if (!netlink_has_listeners(sock, group + 1))
return -ESRCH;
skb = alloc_skb(NLMSG_SPACE(len), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
seq++;
nlh = nlmsg_put(skb, 0, seq, type, len, 0);
memcpy(NLMSG_DATA(nlh), msg, len);
NETLINK_CB(skb).portid = 0;
NETLINK_CB(skb).dst_group = 0;
ret = netlink_broadcast(sock, skb, 0, group + 1, GFP_ATOMIC);
if (!ret)
return len;
if (ret != -ESRCH)
netdev_err(dev, "nl broadcast g=%d, t=%d, l=%d, r=%d\n",
group, type, len, ret);
else if (netlink_has_listeners(sock, group + 1))
return -EAGAIN;
return ret;
}
| linux-master | drivers/staging/gdm724x/netlink_k.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for KeyStream wireless LAN cards.
*
* Copyright (C) 2005-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
*/
#include <crypto/hash.h>
#include <linux/circ_buf.h>
#include <linux/if_arp.h>
#include <net/iw_handler.h>
#include <uapi/linux/llc.h>
#include "eap_packet.h"
#include "ks_wlan.h"
#include "ks_hostif.h"
#define MICHAEL_MIC_KEY_LEN 8
#define MICHAEL_MIC_LEN 8
static inline void inc_smeqhead(struct ks_wlan_private *priv)
{
priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE;
}
static inline void inc_smeqtail(struct ks_wlan_private *priv)
{
priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE;
}
static inline unsigned int cnt_smeqbody(struct ks_wlan_private *priv)
{
return CIRC_CNT_TO_END(priv->sme_i.qhead, priv->sme_i.qtail,
SME_EVENT_BUFF_SIZE);
}
static inline u8 get_byte(struct ks_wlan_private *priv)
{
u8 data;
data = *priv->rxp++;
/* length check in advance ! */
--(priv->rx_size);
return data;
}
static inline u16 get_word(struct ks_wlan_private *priv)
{
u16 data;
data = (get_byte(priv) & 0xff);
data |= ((get_byte(priv) << 8) & 0xff00);
return data;
}
static inline u32 get_dword(struct ks_wlan_private *priv)
{
u32 data;
data = (get_byte(priv) & 0xff);
data |= ((get_byte(priv) << 8) & 0x0000ff00);
data |= ((get_byte(priv) << 16) & 0x00ff0000);
data |= ((get_byte(priv) << 24) & 0xff000000);
return data;
}
static void ks_wlan_hw_wakeup_task(struct work_struct *work)
{
struct ks_wlan_private *priv;
int ps_status;
long time_left;
priv = container_of(work, struct ks_wlan_private, wakeup_work);
ps_status = atomic_read(&priv->psstatus.status);
if (ps_status == PS_SNOOZE) {
ks_wlan_hw_wakeup_request(priv);
time_left = wait_for_completion_interruptible_timeout(&priv->psstatus.wakeup_wait,
msecs_to_jiffies(20));
if (time_left <= 0) {
netdev_dbg(priv->net_dev, "wake up timeout or interrupted !!!\n");
schedule_work(&priv->wakeup_work);
return;
}
}
}
static void ks_wlan_do_power_save(struct ks_wlan_private *priv)
{
if (is_connect_status(priv->connect_status))
hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
else
priv->dev_state = DEVICE_STATE_READY;
}
static
int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info *ap_info)
{
struct local_ap *ap;
union iwreq_data wrqu;
struct net_device *netdev = priv->net_dev;
u8 size;
ap = &priv->current_ap;
if (is_disconnect_status(priv->connect_status)) {
memset(ap, 0, sizeof(struct local_ap));
return -EPERM;
}
ether_addr_copy(ap->bssid, ap_info->bssid);
memcpy(ap->ssid.body, priv->reg.ssid.body,
priv->reg.ssid.size);
ap->ssid.size = priv->reg.ssid.size;
memcpy(ap->rate_set.body, ap_info->rate_set.body,
ap_info->rate_set.size);
ap->rate_set.size = ap_info->rate_set.size;
if (ap_info->ext_rate_set.size != 0) {
memcpy(&ap->rate_set.body[ap->rate_set.size],
ap_info->ext_rate_set.body,
ap_info->ext_rate_set.size);
ap->rate_set.size += ap_info->ext_rate_set.size;
}
ap->channel = ap_info->ds_parameter.channel;
ap->rssi = ap_info->rssi;
ap->sq = ap_info->sq;
ap->noise = ap_info->noise;
ap->capability = le16_to_cpu(ap_info->capability);
size = (ap_info->rsn.size <= RSN_IE_BODY_MAX) ?
ap_info->rsn.size : RSN_IE_BODY_MAX;
if ((ap_info->rsn_mode & RSN_MODE_WPA2) &&
(priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)) {
ap->rsn_ie.id = RSN_INFO_ELEM_ID;
ap->rsn_ie.size = size;
memcpy(ap->rsn_ie.body, ap_info->rsn.body, size);
} else if ((ap_info->rsn_mode & RSN_MODE_WPA) &&
(priv->wpa.version == IW_AUTH_WPA_VERSION_WPA)) {
ap->wpa_ie.id = WPA_INFO_ELEM_ID;
ap->wpa_ie.size = size;
memcpy(ap->wpa_ie.body, ap_info->rsn.body, size);
} else {
ap->rsn_ie.id = 0;
ap->rsn_ie.size = 0;
ap->wpa_ie.id = 0;
ap->wpa_ie.size = 0;
}
wrqu.data.length = 0;
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
if (is_connect_status(priv->connect_status)) {
ether_addr_copy(wrqu.ap_addr.sa_data, priv->current_ap.bssid);
netdev_dbg(priv->net_dev,
"IWEVENT: connect bssid=%pM\n",
wrqu.ap_addr.sa_data);
wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
}
netdev_dbg(priv->net_dev, "Link AP\n"
"- bssid=%pM\n"
"- essid=%s\n"
"- rate_set=%02X,%02X,%02X,%02X,%02X,%02X,%02X,%02X\n"
"- channel=%d\n"
"- rssi=%d\n"
"- sq=%d\n"
"- capability=%04X\n"
"- rsn.mode=%d\n"
"- rsn.size=%d\n"
"- ext_rate_set_size=%d\n"
"- rate_set_size=%d\n",
ap->bssid,
&ap->ssid.body[0],
ap->rate_set.body[0], ap->rate_set.body[1],
ap->rate_set.body[2], ap->rate_set.body[3],
ap->rate_set.body[4], ap->rate_set.body[5],
ap->rate_set.body[6], ap->rate_set.body[7],
ap->channel, ap->rssi, ap->sq, ap->capability,
ap_info->rsn_mode, ap_info->rsn.size,
ap_info->ext_rate_set.size, ap_info->rate_set.size);
return 0;
}
static u8 read_ie(unsigned char *bp, u8 max, u8 *body)
{
u8 size = (*(bp + 1) <= max) ? *(bp + 1) : max;
memcpy(body, bp + 2, size);
return size;
}
static int
michael_mic(u8 *key, u8 *data, unsigned int len, u8 priority, u8 *result)
{
u8 pad_data[4] = { priority, 0, 0, 0 };
struct crypto_shash *tfm = NULL;
struct shash_desc *desc = NULL;
int ret;
tfm = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
goto err;
}
ret = crypto_shash_setkey(tfm, key, MICHAEL_MIC_KEY_LEN);
if (ret < 0)
goto err_free_tfm;
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
if (!desc) {
ret = -ENOMEM;
goto err_free_tfm;
}
desc->tfm = tfm;
ret = crypto_shash_init(desc);
if (ret < 0)
goto err_free_desc;
// Compute the MIC value
/*
* IEEE802.11i page 47
* Figure 43g TKIP MIC processing format
* +--+--+--------+--+----+--+--+--+--+--+--+--+--+
* |6 |6 |1 |3 |M |1 |1 |1 |1 |1 |1 |1 |1 | Octet
* +--+--+--------+--+----+--+--+--+--+--+--+--+--+
* |DA|SA|Priority|0 |Data|M0|M1|M2|M3|M4|M5|M6|M7|
* +--+--+--------+--+----+--+--+--+--+--+--+--+--+
*/
ret = crypto_shash_update(desc, data, 12);
if (ret < 0)
goto err_free_desc;
ret = crypto_shash_update(desc, pad_data, 4);
if (ret < 0)
goto err_free_desc;
ret = crypto_shash_finup(desc, data + 12, len - 12, result);
err_free_desc:
kfree_sensitive(desc);
err_free_tfm:
crypto_free_shash(tfm);
err:
return ret;
}
static
int get_ap_information(struct ks_wlan_private *priv, struct ap_info *ap_info,
struct local_ap *ap)
{
unsigned char *bp;
int bsize, offset;
memset(ap, 0, sizeof(struct local_ap));
ether_addr_copy(ap->bssid, ap_info->bssid);
ap->rssi = ap_info->rssi;
ap->sq = ap_info->sq;
ap->noise = ap_info->noise;
ap->capability = le16_to_cpu(ap_info->capability);
ap->channel = ap_info->ch_info;
bp = ap_info->body;
bsize = le16_to_cpu(ap_info->body_size);
offset = 0;
while (bsize > offset) {
switch (*bp) { /* Information Element ID */
case WLAN_EID_SSID:
ap->ssid.size = read_ie(bp, IEEE80211_MAX_SSID_LEN,
ap->ssid.body);
break;
case WLAN_EID_SUPP_RATES:
case WLAN_EID_EXT_SUPP_RATES:
if ((*(bp + 1) + ap->rate_set.size) <=
RATE_SET_MAX_SIZE) {
memcpy(&ap->rate_set.body[ap->rate_set.size],
bp + 2, *(bp + 1));
ap->rate_set.size += *(bp + 1);
} else {
memcpy(&ap->rate_set.body[ap->rate_set.size],
bp + 2,
RATE_SET_MAX_SIZE - ap->rate_set.size);
ap->rate_set.size +=
(RATE_SET_MAX_SIZE - ap->rate_set.size);
}
break;
case WLAN_EID_RSN:
ap->rsn_ie.id = *bp;
ap->rsn_ie.size = read_ie(bp, RSN_IE_BODY_MAX,
ap->rsn_ie.body);
break;
case WLAN_EID_VENDOR_SPECIFIC: /* WPA */
/* WPA OUI check */
if (memcmp(bp + 2, CIPHER_ID_WPA_WEP40, 4) == 0) {
ap->wpa_ie.id = *bp;
ap->wpa_ie.size = read_ie(bp, RSN_IE_BODY_MAX,
ap->wpa_ie.body);
}
break;
case WLAN_EID_DS_PARAMS:
case WLAN_EID_FH_PARAMS:
case WLAN_EID_CF_PARAMS:
case WLAN_EID_TIM:
case WLAN_EID_IBSS_PARAMS:
case WLAN_EID_COUNTRY:
case WLAN_EID_ERP_INFO:
break;
default:
netdev_err(priv->net_dev,
"unknown Element ID=%d\n", *bp);
break;
}
offset += 2; /* id & size field */
offset += *(bp + 1); /* +size offset */
bp += (*(bp + 1) + 2); /* pointer update */
}
return 0;
}
static
int hostif_data_indication_wpa(struct ks_wlan_private *priv,
unsigned short auth_type)
{
struct ether_hdr *eth_hdr;
unsigned short eth_proto;
unsigned char recv_mic[MICHAEL_MIC_LEN];
char buf[128];
unsigned long now;
struct mic_failure *mic_failure;
u8 mic[MICHAEL_MIC_LEN];
union iwreq_data wrqu;
unsigned int key_index = auth_type - 1;
struct wpa_key *key = &priv->wpa.key[key_index];
eth_hdr = (struct ether_hdr *)(priv->rxp);
eth_proto = ntohs(eth_hdr->h_proto);
if (eth_hdr->h_dest_snap != eth_hdr->h_source_snap) {
netdev_err(priv->net_dev, "invalid data format\n");
priv->nstats.rx_errors++;
return -EINVAL;
}
if (((auth_type == TYPE_PMK1 &&
priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) ||
(auth_type == TYPE_GMK1 &&
priv->wpa.group_suite == IW_AUTH_CIPHER_TKIP) ||
(auth_type == TYPE_GMK2 &&
priv->wpa.group_suite == IW_AUTH_CIPHER_TKIP)) &&
key->key_len) {
int ret;
netdev_dbg(priv->net_dev, "TKIP: protocol=%04X: size=%u\n",
eth_proto, priv->rx_size);
/* MIC save */
memcpy(&recv_mic[0],
(priv->rxp) + ((priv->rx_size) - sizeof(recv_mic)),
sizeof(recv_mic));
priv->rx_size = priv->rx_size - sizeof(recv_mic);
ret = michael_mic(key->rx_mic_key, priv->rxp, priv->rx_size,
0, mic);
if (ret < 0)
return ret;
if (memcmp(mic, recv_mic, sizeof(mic)) != 0) {
now = jiffies;
mic_failure = &priv->wpa.mic_failure;
/* MIC FAILURE */
if (mic_failure->last_failure_time &&
(now - mic_failure->last_failure_time) / HZ >= 60) {
mic_failure->failure = 0;
}
netdev_err(priv->net_dev, "MIC FAILURE\n");
if (mic_failure->failure == 0) {
mic_failure->failure = 1;
mic_failure->counter = 0;
} else if (mic_failure->failure == 1) {
mic_failure->failure = 2;
mic_failure->counter =
(u16)((now - mic_failure->last_failure_time) / HZ);
/* range 1-60 */
if (!mic_failure->counter)
mic_failure->counter = 1;
}
priv->wpa.mic_failure.last_failure_time = now;
/* needed parameters: count, keyid, key type, TSC */
sprintf(buf,
"MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr=%pM)",
key_index,
eth_hdr->h_dest[0] & 0x01 ? "broad" : "uni",
eth_hdr->h_source);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = strlen(buf);
wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu,
buf);
return -EINVAL;
}
}
return 0;
}
static
void hostif_data_indication(struct ks_wlan_private *priv)
{
unsigned int rx_ind_size; /* indicate data size */
struct sk_buff *skb;
u16 auth_type;
unsigned char temp[256];
struct ether_hdr *eth_hdr;
struct ieee802_1x_hdr *aa1x_hdr;
size_t size;
int ret;
/* min length check */
if (priv->rx_size <= ETH_HLEN) {
priv->nstats.rx_errors++;
return;
}
auth_type = get_word(priv); /* AuthType */
get_word(priv); /* Reserve Area */
eth_hdr = (struct ether_hdr *)(priv->rxp);
/* source address check */
if (ether_addr_equal(&priv->eth_addr[0], eth_hdr->h_source)) {
netdev_err(priv->net_dev, "invalid : source is own mac address !!\n");
netdev_err(priv->net_dev, "eth_hdrernet->h_dest=%pM\n", eth_hdr->h_source);
priv->nstats.rx_errors++;
return;
}
/* for WPA */
if (auth_type != TYPE_DATA && priv->wpa.rsn_enabled) {
ret = hostif_data_indication_wpa(priv, auth_type);
if (ret)
return;
}
if ((priv->connect_status & FORCE_DISCONNECT) ||
priv->wpa.mic_failure.failure == 2) {
return;
}
/* check 13th byte at rx data */
switch (*(priv->rxp + 12)) {
case LLC_SAP_SNAP:
rx_ind_size = priv->rx_size - 6;
skb = dev_alloc_skb(rx_ind_size);
if (!skb) {
priv->nstats.rx_dropped++;
return;
}
netdev_dbg(priv->net_dev, "SNAP, rx_ind_size = %d\n",
rx_ind_size);
size = ETH_ALEN * 2;
skb_put_data(skb, priv->rxp, size);
/* (SNAP+UI..) skip */
size = rx_ind_size - (ETH_ALEN * 2);
skb_put_data(skb, ð_hdr->h_proto, size);
aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + ETHER_HDR_SIZE);
break;
case LLC_SAP_NETBEUI:
rx_ind_size = (priv->rx_size + 2);
skb = dev_alloc_skb(rx_ind_size);
if (!skb) {
priv->nstats.rx_dropped++;
return;
}
netdev_dbg(priv->net_dev, "NETBEUI/NetBIOS rx_ind_size=%d\n",
rx_ind_size);
/* 8802/FDDI MAC copy */
skb_put_data(skb, priv->rxp, 12);
/* NETBEUI size add */
temp[0] = (((rx_ind_size - 12) >> 8) & 0xff);
temp[1] = ((rx_ind_size - 12) & 0xff);
skb_put_data(skb, temp, 2);
/* copy after Type */
skb_put_data(skb, priv->rxp + 12, rx_ind_size - 14);
aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 14);
break;
default: /* other rx data */
netdev_err(priv->net_dev, "invalid data format\n");
priv->nstats.rx_errors++;
return;
}
if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY &&
priv->wpa.rsn_enabled)
atomic_set(&priv->psstatus.snooze_guard, 1);
/* rx indication */
skb->dev = priv->net_dev;
skb->protocol = eth_type_trans(skb, skb->dev);
priv->nstats.rx_packets++;
priv->nstats.rx_bytes += rx_ind_size;
netif_rx(skb);
}
static
void hostif_mib_get_confirm(struct ks_wlan_private *priv)
{
struct net_device *dev = priv->net_dev;
u32 mib_status;
u32 mib_attribute;
mib_status = get_dword(priv);
mib_attribute = get_dword(priv);
get_word(priv); /* mib_val_size */
get_word(priv); /* mib_val_type */
if (mib_status) {
netdev_err(priv->net_dev, "attribute=%08X, status=%08X\n",
mib_attribute, mib_status);
return;
}
switch (mib_attribute) {
case DOT11_MAC_ADDRESS:
hostif_sme_enqueue(priv, SME_GET_MAC_ADDRESS);
ether_addr_copy(priv->eth_addr, priv->rxp);
priv->mac_address_valid = true;
eth_hw_addr_set(dev, priv->eth_addr);
netdev_info(dev, "MAC ADDRESS = %pM\n", priv->eth_addr);
break;
case DOT11_PRODUCT_VERSION:
priv->version_size = priv->rx_size;
memcpy(priv->firmware_version, priv->rxp, priv->rx_size);
priv->firmware_version[priv->rx_size] = '\0';
netdev_info(dev, "firmware ver. = %s\n",
priv->firmware_version);
hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION);
/* wake_up_interruptible_all(&priv->confirm_wait); */
complete(&priv->confirm_wait);
break;
case LOCAL_GAIN:
memcpy(&priv->gain, priv->rxp, sizeof(priv->gain));
netdev_dbg(priv->net_dev, "tx_mode=%d, rx_mode=%d, tx_gain=%d, rx_gain=%d\n",
priv->gain.tx_mode, priv->gain.rx_mode,
priv->gain.tx_gain, priv->gain.rx_gain);
break;
case LOCAL_EEPROM_SUM:
memcpy(&priv->eeprom_sum, priv->rxp, sizeof(priv->eeprom_sum));
if (priv->eeprom_sum.type != 0 &&
priv->eeprom_sum.type != 1) {
netdev_err(dev, "LOCAL_EEPROM_SUM error!\n");
return;
}
priv->eeprom_checksum = (priv->eeprom_sum.type == 0) ?
EEPROM_CHECKSUM_NONE :
(priv->eeprom_sum.result == 0) ?
EEPROM_NG : EEPROM_OK;
break;
default:
netdev_err(priv->net_dev, "mib_attribute=%08x\n",
(unsigned int)mib_attribute);
break;
}
}
static
void hostif_mib_set_confirm(struct ks_wlan_private *priv)
{
u32 mib_status;
u32 mib_attribute;
mib_status = get_dword(priv);
mib_attribute = get_dword(priv);
if (mib_status) {
/* in case of error */
netdev_err(priv->net_dev, "error :: attribute=%08X, status=%08X\n",
mib_attribute, mib_status);
}
switch (mib_attribute) {
case DOT11_RTS_THRESHOLD:
hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_CONFIRM);
break;
case DOT11_FRAGMENTATION_THRESHOLD:
hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_CONFIRM);
break;
case DOT11_WEP_DEFAULT_KEY_ID:
if (!priv->wpa.wpa_enabled)
hostif_sme_enqueue(priv, SME_WEP_INDEX_CONFIRM);
break;
case DOT11_WEP_DEFAULT_KEY_VALUE1:
if (priv->wpa.rsn_enabled)
hostif_sme_enqueue(priv, SME_SET_PMK_TSC);
else
hostif_sme_enqueue(priv, SME_WEP_KEY1_CONFIRM);
break;
case DOT11_WEP_DEFAULT_KEY_VALUE2:
if (priv->wpa.rsn_enabled)
hostif_sme_enqueue(priv, SME_SET_GMK1_TSC);
else
hostif_sme_enqueue(priv, SME_WEP_KEY2_CONFIRM);
break;
case DOT11_WEP_DEFAULT_KEY_VALUE3:
if (priv->wpa.rsn_enabled)
hostif_sme_enqueue(priv, SME_SET_GMK2_TSC);
else
hostif_sme_enqueue(priv, SME_WEP_KEY3_CONFIRM);
break;
case DOT11_WEP_DEFAULT_KEY_VALUE4:
if (!priv->wpa.rsn_enabled)
hostif_sme_enqueue(priv, SME_WEP_KEY4_CONFIRM);
break;
case DOT11_PRIVACY_INVOKED:
if (!priv->wpa.rsn_enabled)
hostif_sme_enqueue(priv, SME_WEP_FLAG_CONFIRM);
break;
case DOT11_RSN_ENABLED:
hostif_sme_enqueue(priv, SME_RSN_ENABLED_CONFIRM);
break;
case LOCAL_RSN_MODE:
hostif_sme_enqueue(priv, SME_RSN_MODE_CONFIRM);
break;
case LOCAL_MULTICAST_ADDRESS:
hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
break;
case LOCAL_MULTICAST_FILTER:
hostif_sme_enqueue(priv, SME_MULTICAST_CONFIRM);
break;
case LOCAL_CURRENTADDRESS:
priv->mac_address_valid = true;
break;
case DOT11_RSN_CONFIG_MULTICAST_CIPHER:
hostif_sme_enqueue(priv, SME_RSN_MCAST_CONFIRM);
break;
case DOT11_RSN_CONFIG_UNICAST_CIPHER:
hostif_sme_enqueue(priv, SME_RSN_UCAST_CONFIRM);
break;
case DOT11_RSN_CONFIG_AUTH_SUITE:
hostif_sme_enqueue(priv, SME_RSN_AUTH_CONFIRM);
break;
case DOT11_GMK1_TSC:
if (atomic_read(&priv->psstatus.snooze_guard))
atomic_set(&priv->psstatus.snooze_guard, 0);
break;
case DOT11_GMK2_TSC:
if (atomic_read(&priv->psstatus.snooze_guard))
atomic_set(&priv->psstatus.snooze_guard, 0);
break;
case DOT11_PMK_TSC:
case LOCAL_PMK:
case LOCAL_GAIN:
case LOCAL_WPS_ENABLE:
case LOCAL_WPS_PROBE_REQ:
case LOCAL_REGION:
default:
break;
}
}
static
void hostif_power_mgmt_confirm(struct ks_wlan_private *priv)
{
if (priv->reg.power_mgmt > POWER_MGMT_ACTIVE &&
priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
atomic_set(&priv->psstatus.confirm_wait, 0);
priv->dev_state = DEVICE_STATE_SLEEP;
ks_wlan_hw_power_save(priv);
} else {
priv->dev_state = DEVICE_STATE_READY;
}
}
static
void hostif_sleep_confirm(struct ks_wlan_private *priv)
{
atomic_set(&priv->sleepstatus.doze_request, 1);
queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
}
static
void hostif_start_confirm(struct ks_wlan_private *priv)
{
union iwreq_data wrqu;
wrqu.data.length = 0;
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
if (is_connect_status(priv->connect_status)) {
eth_zero_addr(wrqu.ap_addr.sa_data);
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
}
netdev_dbg(priv->net_dev, " scan_ind_count=%d\n", priv->scan_ind_count);
hostif_sme_enqueue(priv, SME_START_CONFIRM);
}
static
void hostif_connect_indication(struct ks_wlan_private *priv)
{
u16 connect_code;
unsigned int tmp = 0;
unsigned int old_status = priv->connect_status;
struct net_device *netdev = priv->net_dev;
union iwreq_data wrqu0;
connect_code = get_word(priv);
switch (connect_code) {
case RESULT_CONNECT:
if (!(priv->connect_status & FORCE_DISCONNECT))
netif_carrier_on(netdev);
tmp = FORCE_DISCONNECT & priv->connect_status;
priv->connect_status = tmp + CONNECT_STATUS;
break;
case RESULT_DISCONNECT:
netif_carrier_off(netdev);
tmp = FORCE_DISCONNECT & priv->connect_status;
priv->connect_status = tmp + DISCONNECT_STATUS;
break;
default:
netdev_dbg(priv->net_dev, "unknown connect_code=%d :: scan_ind_count=%d\n",
connect_code, priv->scan_ind_count);
netif_carrier_off(netdev);
tmp = FORCE_DISCONNECT & priv->connect_status;
priv->connect_status = tmp + DISCONNECT_STATUS;
break;
}
get_current_ap(priv, (struct link_ap_info *)priv->rxp);
if (is_connect_status(priv->connect_status) &&
is_disconnect_status(old_status)) {
/* for power save */
atomic_set(&priv->psstatus.snooze_guard, 0);
atomic_set(&priv->psstatus.confirm_wait, 0);
}
ks_wlan_do_power_save(priv);
wrqu0.data.length = 0;
wrqu0.data.flags = 0;
wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
if (is_disconnect_status(priv->connect_status) &&
is_connect_status(old_status)) {
eth_zero_addr(wrqu0.ap_addr.sa_data);
netdev_dbg(priv->net_dev, "disconnect :: scan_ind_count=%d\n",
priv->scan_ind_count);
wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
}
priv->scan_ind_count = 0;
}
static
void hostif_scan_indication(struct ks_wlan_private *priv)
{
int i;
struct ap_info *ap_info;
netdev_dbg(priv->net_dev,
"scan_ind_count = %d\n", priv->scan_ind_count);
ap_info = (struct ap_info *)(priv->rxp);
if (priv->scan_ind_count) {
/* bssid check */
for (i = 0; i < priv->aplist.size; i++) {
u8 *bssid = priv->aplist.ap[i].bssid;
if (ether_addr_equal(ap_info->bssid, bssid))
continue;
if (ap_info->frame_type == IEEE80211_STYPE_PROBE_RESP)
get_ap_information(priv, ap_info,
&priv->aplist.ap[i]);
return;
}
}
priv->scan_ind_count++;
if (priv->scan_ind_count < LOCAL_APLIST_MAX + 1) {
netdev_dbg(priv->net_dev, " scan_ind_count=%d :: aplist.size=%d\n",
priv->scan_ind_count, priv->aplist.size);
get_ap_information(priv, (struct ap_info *)(priv->rxp),
&priv->aplist.ap[priv->scan_ind_count - 1]);
priv->aplist.size = priv->scan_ind_count;
} else {
netdev_dbg(priv->net_dev, " count over :: scan_ind_count=%d\n",
priv->scan_ind_count);
}
}
static
void hostif_stop_confirm(struct ks_wlan_private *priv)
{
unsigned int tmp = 0;
unsigned int old_status = priv->connect_status;
struct net_device *netdev = priv->net_dev;
union iwreq_data wrqu0;
if (priv->dev_state == DEVICE_STATE_SLEEP)
priv->dev_state = DEVICE_STATE_READY;
/* disconnect indication */
if (is_connect_status(priv->connect_status)) {
netif_carrier_off(netdev);
tmp = FORCE_DISCONNECT & priv->connect_status;
priv->connect_status = tmp | DISCONNECT_STATUS;
netdev_info(netdev, "IWEVENT: disconnect\n");
wrqu0.data.length = 0;
wrqu0.data.flags = 0;
wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
if (is_disconnect_status(priv->connect_status) &&
is_connect_status(old_status)) {
eth_zero_addr(wrqu0.ap_addr.sa_data);
netdev_info(netdev, "IWEVENT: disconnect\n");
wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
}
priv->scan_ind_count = 0;
}
hostif_sme_enqueue(priv, SME_STOP_CONFIRM);
}
static
void hostif_ps_adhoc_set_confirm(struct ks_wlan_private *priv)
{
priv->infra_status = 0; /* infrastructure mode cancel */
hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
}
static
void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
{
get_word(priv); /* result_code */
priv->infra_status = 1; /* infrastructure mode set */
hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
}
static
void hostif_adhoc_set_confirm(struct ks_wlan_private *priv)
{
priv->infra_status = 1; /* infrastructure mode set */
hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
}
static
void hostif_associate_indication(struct ks_wlan_private *priv)
{
struct association_request *assoc_req;
struct association_response *assoc_resp;
unsigned char *pb;
union iwreq_data wrqu;
char buf[IW_CUSTOM_MAX];
char *pbuf = &buf[0];
int i;
static const char associnfo_leader0[] = "ASSOCINFO(ReqIEs=";
static const char associnfo_leader1[] = " RespIEs=";
assoc_req = (struct association_request *)(priv->rxp);
assoc_resp = (struct association_response *)(assoc_req + 1);
pb = (unsigned char *)(assoc_resp + 1);
memset(&wrqu, 0, sizeof(wrqu));
memcpy(pbuf, associnfo_leader0, sizeof(associnfo_leader0) - 1);
wrqu.data.length += sizeof(associnfo_leader0) - 1;
pbuf += sizeof(associnfo_leader0) - 1;
for (i = 0; i < le16_to_cpu(assoc_req->req_ies_size); i++)
pbuf += sprintf(pbuf, "%02x", *(pb + i));
wrqu.data.length += (le16_to_cpu(assoc_req->req_ies_size)) * 2;
memcpy(pbuf, associnfo_leader1, sizeof(associnfo_leader1) - 1);
wrqu.data.length += sizeof(associnfo_leader1) - 1;
pbuf += sizeof(associnfo_leader1) - 1;
pb += le16_to_cpu(assoc_req->req_ies_size);
for (i = 0; i < le16_to_cpu(assoc_resp->resp_ies_size); i++)
pbuf += sprintf(pbuf, "%02x", *(pb + i));
wrqu.data.length += (le16_to_cpu(assoc_resp->resp_ies_size)) * 2;
pbuf += sprintf(pbuf, ")");
wrqu.data.length += 1;
wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu, buf);
}
static
void hostif_bss_scan_confirm(struct ks_wlan_private *priv)
{
u32 result_code;
struct net_device *dev = priv->net_dev;
union iwreq_data wrqu;
result_code = get_dword(priv);
netdev_dbg(priv->net_dev, "result=%d :: scan_ind_count=%d\n",
result_code, priv->scan_ind_count);
priv->sme_i.sme_flag &= ~SME_AP_SCAN;
hostif_sme_enqueue(priv, SME_BSS_SCAN_CONFIRM);
wrqu.data.length = 0;
wrqu.data.flags = 0;
wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
priv->scan_ind_count = 0;
}
static
void hostif_phy_information_confirm(struct ks_wlan_private *priv)
{
struct iw_statistics *wstats = &priv->wstats;
u8 rssi, signal;
u8 link_speed;
u32 transmitted_frame_count, received_fragment_count;
u32 failed_count, fcs_error_count;
rssi = get_byte(priv);
signal = get_byte(priv);
get_byte(priv); /* noise */
link_speed = get_byte(priv);
transmitted_frame_count = get_dword(priv);
received_fragment_count = get_dword(priv);
failed_count = get_dword(priv);
fcs_error_count = get_dword(priv);
netdev_dbg(priv->net_dev, "phyinfo confirm rssi=%d signal=%d\n",
rssi, signal);
priv->current_rate = (link_speed & RATE_MASK);
wstats->qual.qual = signal;
wstats->qual.level = 256 - rssi;
wstats->qual.noise = 0; /* invalid noise value */
wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
netdev_dbg(priv->net_dev, "\n rssi=%u\n"
" signal=%u\n"
" link_speed=%ux500Kbps\n"
" transmitted_frame_count=%u\n"
" received_fragment_count=%u\n"
" failed_count=%u\n"
" fcs_error_count=%u\n",
rssi, signal, link_speed, transmitted_frame_count,
received_fragment_count, failed_count, fcs_error_count);
/* wake_up_interruptible_all(&priv->confirm_wait); */
complete(&priv->confirm_wait);
}
static
void hostif_mic_failure_confirm(struct ks_wlan_private *priv)
{
netdev_dbg(priv->net_dev, "mic_failure=%u\n",
priv->wpa.mic_failure.failure);
hostif_sme_enqueue(priv, SME_MIC_FAILURE_CONFIRM);
}
static
void hostif_event_check(struct ks_wlan_private *priv)
{
u16 event;
event = get_word(priv);
switch (event) {
case HIF_DATA_IND:
hostif_data_indication(priv);
break;
case HIF_MIB_GET_CONF:
hostif_mib_get_confirm(priv);
break;
case HIF_MIB_SET_CONF:
hostif_mib_set_confirm(priv);
break;
case HIF_POWER_MGMT_CONF:
hostif_power_mgmt_confirm(priv);
break;
case HIF_SLEEP_CONF:
hostif_sleep_confirm(priv);
break;
case HIF_START_CONF:
hostif_start_confirm(priv);
break;
case HIF_CONNECT_IND:
hostif_connect_indication(priv);
break;
case HIF_STOP_CONF:
hostif_stop_confirm(priv);
break;
case HIF_PS_ADH_SET_CONF:
hostif_ps_adhoc_set_confirm(priv);
break;
case HIF_INFRA_SET_CONF:
case HIF_INFRA_SET2_CONF:
hostif_infrastructure_set_confirm(priv);
break;
case HIF_ADH_SET_CONF:
case HIF_ADH_SET2_CONF:
hostif_adhoc_set_confirm(priv);
break;
case HIF_ASSOC_INFO_IND:
hostif_associate_indication(priv);
break;
case HIF_MIC_FAILURE_CONF:
hostif_mic_failure_confirm(priv);
break;
case HIF_SCAN_CONF:
hostif_bss_scan_confirm(priv);
break;
case HIF_PHY_INFO_CONF:
case HIF_PHY_INFO_IND:
hostif_phy_information_confirm(priv);
break;
case HIF_SCAN_IND:
hostif_scan_indication(priv);
break;
case HIF_AP_SET_CONF:
default:
netdev_err(priv->net_dev, "undefined event[%04X]\n", event);
/* wake_up_all(&priv->confirm_wait); */
complete(&priv->confirm_wait);
break;
}
/* add event to hostt buffer */
priv->hostt.buff[priv->hostt.qtail] = event;
priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
}
/* allocate size bytes, set header size and event */
static void *hostif_generic_request(size_t size, int event)
{
struct hostif_hdr *p;
p = kzalloc(hif_align_size(size), GFP_ATOMIC);
if (!p)
return NULL;
p->size = cpu_to_le16(size - sizeof(p->size));
p->event = cpu_to_le16(event);
return p;
}
int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
{
unsigned int skb_len = 0;
unsigned char *buffer = NULL;
unsigned int length = 0;
struct hostif_data_request *pp;
unsigned char *p;
unsigned short eth_proto;
struct ether_hdr *eth_hdr;
unsigned short keyinfo = 0;
struct ieee802_1x_hdr *aa1x_hdr;
struct wpa_eapol_key *eap_key;
struct ethhdr *eth;
size_t size;
int ret;
skb_len = skb->len;
if (skb_len > ETH_FRAME_LEN) {
netdev_err(priv->net_dev, "bad length skb_len=%d\n", skb_len);
ret = -EOVERFLOW;
goto err_kfree_skb;
}
if (is_disconnect_status(priv->connect_status) ||
(priv->connect_status & FORCE_DISCONNECT) ||
priv->wpa.mic_failure.stop) {
if (netif_queue_stopped(priv->net_dev))
netif_wake_queue(priv->net_dev);
dev_kfree_skb(skb);
return 0;
}
/* power save wakeup */
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
if (!netif_queue_stopped(priv->net_dev))
netif_stop_queue(priv->net_dev);
}
size = sizeof(*pp) + 6 + skb_len + 8;
pp = kmalloc(hif_align_size(size), GFP_ATOMIC);
if (!pp) {
ret = -ENOMEM;
goto err_kfree_skb;
}
p = (unsigned char *)pp->data;
buffer = skb->data;
length = skb->len;
/* skb check */
eth = (struct ethhdr *)skb->data;
if (!ether_addr_equal(&priv->eth_addr[0], eth->h_source)) {
netdev_err(priv->net_dev,
"Invalid mac address: ethernet->h_source=%pM\n",
eth->h_source);
ret = -ENXIO;
goto err_kfree;
}
/* dest and src MAC address copy */
size = ETH_ALEN * 2;
memcpy(p, buffer, size);
p += size;
buffer += size;
length -= size;
/* EtherType/Length check */
if (*(buffer + 1) + (*buffer << 8) > 1500) {
/* ProtocolEAP = *(buffer+1) + (*buffer << 8); */
/* SAP/CTL/OUI(6 byte) add */
*p++ = 0xAA; /* DSAP */
*p++ = 0xAA; /* SSAP */
*p++ = 0x03; /* CTL */
*p++ = 0x00; /* OUI ("000000") */
*p++ = 0x00; /* OUI ("000000") */
*p++ = 0x00; /* OUI ("000000") */
skb_len += 6;
} else {
/* Length(2 byte) delete */
buffer += 2;
length -= 2;
skb_len -= 2;
}
/* pp->data copy */
memcpy(p, buffer, length);
p += length;
/* for WPA */
eth_hdr = (struct ether_hdr *)&pp->data[0];
eth_proto = ntohs(eth_hdr->h_proto);
/* for MIC FAILURE REPORT check */
if (eth_proto == ETH_P_PAE &&
priv->wpa.mic_failure.failure > 0) {
aa1x_hdr = (struct ieee802_1x_hdr *)(eth_hdr + 1);
if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY) {
eap_key = (struct wpa_eapol_key *)(aa1x_hdr + 1);
keyinfo = ntohs(eap_key->key_info);
}
}
if (priv->wpa.rsn_enabled && priv->wpa.key[0].key_len) {
/* no encryption */
if (eth_proto == ETH_P_PAE &&
priv->wpa.key[1].key_len == 0 &&
priv->wpa.key[2].key_len == 0 &&
priv->wpa.key[3].key_len == 0) {
pp->auth_type = cpu_to_le16(TYPE_AUTH);
} else {
if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
u8 mic[MICHAEL_MIC_LEN];
ret = michael_mic(priv->wpa.key[0].tx_mic_key,
&pp->data[0], skb_len,
0, mic);
if (ret < 0)
goto err_kfree;
memcpy(p, mic, sizeof(mic));
length += sizeof(mic);
skb_len += sizeof(mic);
p += sizeof(mic);
pp->auth_type =
cpu_to_le16(TYPE_DATA);
} else if (priv->wpa.pairwise_suite ==
IW_AUTH_CIPHER_CCMP) {
pp->auth_type =
cpu_to_le16(TYPE_DATA);
}
}
} else {
if (eth_proto == ETH_P_PAE)
pp->auth_type = cpu_to_le16(TYPE_AUTH);
else
pp->auth_type = cpu_to_le16(TYPE_DATA);
}
/* header value set */
pp->header.size =
cpu_to_le16((sizeof(*pp) - sizeof(pp->header.size) + skb_len));
pp->header.event = cpu_to_le16(HIF_DATA_REQ);
/* tx request */
ret = ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + skb_len),
send_packet_complete, skb);
/* MIC FAILURE REPORT check */
if (eth_proto == ETH_P_PAE &&
priv->wpa.mic_failure.failure > 0) {
if (keyinfo & WPA_KEY_INFO_ERROR &&
keyinfo & WPA_KEY_INFO_REQUEST) {
netdev_err(priv->net_dev,
"MIC ERROR Report SET : %04X\n", keyinfo);
hostif_sme_enqueue(priv, SME_MIC_FAILURE_REQUEST);
}
if (priv->wpa.mic_failure.failure == 2)
priv->wpa.mic_failure.stop = 1;
}
return ret;
err_kfree:
kfree(pp);
err_kfree_skb:
dev_kfree_skb(skb);
return ret;
}
static inline void ps_confirm_wait_inc(struct ks_wlan_private *priv)
{
if (atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET)
atomic_inc(&priv->psstatus.confirm_wait);
}
static inline void send_request_to_device(struct ks_wlan_private *priv,
void *data, size_t size)
{
ps_confirm_wait_inc(priv);
ks_wlan_hw_tx(priv, data, size, NULL, NULL);
}
static void hostif_mib_get_request(struct ks_wlan_private *priv,
u32 mib_attribute)
{
struct hostif_mib_get_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_MIB_GET_REQ);
if (!pp)
return;
pp->mib_attribute = cpu_to_le32(mib_attribute);
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static void hostif_mib_set_request(struct ks_wlan_private *priv,
enum mib_attribute attr,
enum mib_data_type type,
void *data, size_t size)
{
struct hostif_mib_set_request_t *pp;
if (priv->dev_state < DEVICE_STATE_BOOT)
return;
pp = hostif_generic_request(sizeof(*pp), HIF_MIB_SET_REQ);
if (!pp)
return;
pp->mib_attribute = cpu_to_le32(attr);
pp->mib_value.size = cpu_to_le16(size);
pp->mib_value.type = cpu_to_le16(type);
memcpy(&pp->mib_value.body, data, size);
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp) + size));
}
static inline void hostif_mib_set_request_int(struct ks_wlan_private *priv,
enum mib_attribute attr, int val)
{
__le32 v = cpu_to_le32(val);
size_t size = sizeof(v);
hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_INT, &v, size);
}
static inline void hostif_mib_set_request_bool(struct ks_wlan_private *priv,
enum mib_attribute attr,
bool val)
{
__le32 v = cpu_to_le32(val);
size_t size = sizeof(v);
hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_BOOL, &v, size);
}
static inline void hostif_mib_set_request_ostring(struct ks_wlan_private *priv,
enum mib_attribute attr,
void *data, size_t size)
{
hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_OSTRING, data, size);
}
static
void hostif_start_request(struct ks_wlan_private *priv, unsigned char mode)
{
struct hostif_start_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_START_REQ);
if (!pp)
return;
pp->mode = cpu_to_le16(mode);
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
priv->aplist.size = 0;
priv->scan_ind_count = 0;
}
static __le16 ks_wlan_cap(struct ks_wlan_private *priv)
{
u16 capability = 0x0000;
if (priv->reg.preamble == SHORT_PREAMBLE)
capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
capability &= ~(WLAN_CAPABILITY_PBCC); /* pbcc not support */
if (priv->reg.phy_type != D_11B_ONLY_MODE) {
capability |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
capability &= ~(WLAN_CAPABILITY_DSSS_OFDM);
}
return cpu_to_le16(capability);
}
static void init_request(struct ks_wlan_private *priv,
struct hostif_request *req)
{
req->phy_type = cpu_to_le16(priv->reg.phy_type);
req->cts_mode = cpu_to_le16(priv->reg.cts_mode);
req->scan_type = cpu_to_le16(priv->reg.scan_type);
req->rate_set.size = priv->reg.rate_set.size;
req->capability = ks_wlan_cap(priv);
memcpy(&req->rate_set.body[0], &priv->reg.rate_set.body[0],
priv->reg.rate_set.size);
}
static
void hostif_ps_adhoc_set_request(struct ks_wlan_private *priv)
{
struct hostif_ps_adhoc_set_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_PS_ADH_SET_REQ);
if (!pp)
return;
init_request(priv, &pp->request);
pp->channel = cpu_to_le16(priv->reg.channel);
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_infrastructure_set_request(struct ks_wlan_private *priv, int event)
{
struct hostif_infrastructure_set_request *pp;
pp = hostif_generic_request(sizeof(*pp), event);
if (!pp)
return;
init_request(priv, &pp->request);
pp->ssid.size = priv->reg.ssid.size;
memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
pp->beacon_lost_count =
cpu_to_le16(priv->reg.beacon_lost_count);
pp->auth_type = cpu_to_le16(priv->reg.authenticate_type);
pp->channel_list.body[0] = 1;
pp->channel_list.body[1] = 8;
pp->channel_list.body[2] = 2;
pp->channel_list.body[3] = 9;
pp->channel_list.body[4] = 3;
pp->channel_list.body[5] = 10;
pp->channel_list.body[6] = 4;
pp->channel_list.body[7] = 11;
pp->channel_list.body[8] = 5;
pp->channel_list.body[9] = 12;
pp->channel_list.body[10] = 6;
pp->channel_list.body[11] = 13;
pp->channel_list.body[12] = 7;
if (priv->reg.phy_type == D_11G_ONLY_MODE) {
pp->channel_list.size = 13;
} else {
pp->channel_list.body[13] = 14;
pp->channel_list.size = 14;
}
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_adhoc_set_request(struct ks_wlan_private *priv)
{
struct hostif_adhoc_set_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ);
if (!pp)
return;
init_request(priv, &pp->request);
pp->channel = cpu_to_le16(priv->reg.channel);
pp->ssid.size = priv->reg.ssid.size;
memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
{
struct hostif_adhoc_set2_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ);
if (!pp)
return;
init_request(priv, &pp->request);
pp->ssid.size = priv->reg.ssid.size;
memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
pp->channel_list.body[0] = priv->reg.channel;
pp->channel_list.size = 1;
memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN);
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_stop_request(struct ks_wlan_private *priv)
{
struct hostif_stop_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_STOP_REQ);
if (!pp)
return;
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_phy_information_request(struct ks_wlan_private *priv)
{
struct hostif_phy_information_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_PHY_INFO_REQ);
if (!pp)
return;
if (priv->reg.phy_info_timer) {
pp->type = cpu_to_le16(TIME_TYPE);
pp->time = cpu_to_le16(priv->reg.phy_info_timer);
} else {
pp->type = cpu_to_le16(NORMAL_TYPE);
pp->time = cpu_to_le16(0);
}
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_power_mgmt_request(struct ks_wlan_private *priv,
u32 mode, u32 wake_up, u32 receive_dtims)
{
struct hostif_power_mgmt_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_POWER_MGMT_REQ);
if (!pp)
return;
pp->mode = cpu_to_le32(mode);
pp->wake_up = cpu_to_le32(wake_up);
pp->receive_dtims = cpu_to_le32(receive_dtims);
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_sleep_request(struct ks_wlan_private *priv,
enum sleep_mode_type mode)
{
struct hostif_sleep_request *pp;
if (mode == SLP_SLEEP) {
pp = hostif_generic_request(sizeof(*pp), HIF_SLEEP_REQ);
if (!pp)
return;
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
} else if (mode == SLP_ACTIVE) {
atomic_set(&priv->sleepstatus.wakeup_request, 1);
queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
} else {
netdev_err(priv->net_dev, "invalid mode %ld\n", (long)mode);
return;
}
}
static
void hostif_bss_scan_request(struct ks_wlan_private *priv,
unsigned long scan_type, u8 *scan_ssid,
u8 scan_ssid_len)
{
struct hostif_bss_scan_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_SCAN_REQ);
if (!pp)
return;
pp->scan_type = scan_type;
pp->ch_time_min = cpu_to_le32(110); /* default value */
pp->ch_time_max = cpu_to_le32(130); /* default value */
pp->channel_list.body[0] = 1;
pp->channel_list.body[1] = 8;
pp->channel_list.body[2] = 2;
pp->channel_list.body[3] = 9;
pp->channel_list.body[4] = 3;
pp->channel_list.body[5] = 10;
pp->channel_list.body[6] = 4;
pp->channel_list.body[7] = 11;
pp->channel_list.body[8] = 5;
pp->channel_list.body[9] = 12;
pp->channel_list.body[10] = 6;
pp->channel_list.body[11] = 13;
pp->channel_list.body[12] = 7;
if (priv->reg.phy_type == D_11G_ONLY_MODE) {
pp->channel_list.size = 13;
} else {
pp->channel_list.body[13] = 14;
pp->channel_list.size = 14;
}
pp->ssid.size = 0;
/* specified SSID SCAN */
if (scan_ssid_len > 0 && scan_ssid_len <= 32) {
pp->ssid.size = scan_ssid_len;
memcpy(&pp->ssid.body[0], scan_ssid, scan_ssid_len);
}
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
priv->aplist.size = 0;
priv->scan_ind_count = 0;
}
static
void hostif_mic_failure_request(struct ks_wlan_private *priv,
u16 failure_count, u16 timer)
{
struct hostif_mic_failure_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_MIC_FAILURE_REQ);
if (!pp)
return;
pp->failure_count = cpu_to_le16(failure_count);
pp->timer = cpu_to_le16(timer);
send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
/* Device I/O Receive indicate */
static void devio_rec_ind(struct ks_wlan_private *priv, unsigned char *p,
unsigned int size)
{
if (!priv->is_device_open)
return;
spin_lock(&priv->dev_read_lock);
priv->dev_data[atomic_read(&priv->rec_count)] = p;
priv->dev_size[atomic_read(&priv->rec_count)] = size;
if (atomic_read(&priv->event_count) != DEVICE_STOCK_COUNT) {
/* rx event count inc */
atomic_inc(&priv->event_count);
}
atomic_inc(&priv->rec_count);
if (atomic_read(&priv->rec_count) == DEVICE_STOCK_COUNT)
atomic_set(&priv->rec_count, 0);
wake_up_interruptible_all(&priv->devread_wait);
spin_unlock(&priv->dev_read_lock);
}
void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
unsigned int size)
{
devio_rec_ind(priv, p, size);
priv->rxp = p;
priv->rx_size = size;
if (get_word(priv) == priv->rx_size)
hostif_event_check(priv);
}
static void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
{
switch (type) {
case SME_WEP_INDEX_REQUEST:
hostif_mib_set_request_int(priv, DOT11_WEP_DEFAULT_KEY_ID,
priv->reg.wep_index);
break;
case SME_WEP_KEY1_REQUEST:
if (priv->wpa.wpa_enabled)
return;
hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE1,
&priv->reg.wep_key[0].val[0],
priv->reg.wep_key[0].size);
break;
case SME_WEP_KEY2_REQUEST:
if (priv->wpa.wpa_enabled)
return;
hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE2,
&priv->reg.wep_key[1].val[0],
priv->reg.wep_key[1].size);
break;
case SME_WEP_KEY3_REQUEST:
if (priv->wpa.wpa_enabled)
return;
hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE3,
&priv->reg.wep_key[2].val[0],
priv->reg.wep_key[2].size);
break;
case SME_WEP_KEY4_REQUEST:
if (priv->wpa.wpa_enabled)
return;
hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE4,
&priv->reg.wep_key[3].val[0],
priv->reg.wep_key[3].size);
break;
case SME_WEP_FLAG_REQUEST:
hostif_mib_set_request_bool(priv, DOT11_PRIVACY_INVOKED,
priv->reg.privacy_invoked);
break;
}
}
struct wpa_suite {
__le16 size;
unsigned char suite[4][CIPHER_ID_LEN];
} __packed;
struct rsn_mode {
__le32 rsn_mode;
__le16 rsn_capability;
} __packed;
static void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
{
struct wpa_suite wpa_suite;
struct rsn_mode rsn_mode;
size_t size;
u32 mode;
const u8 *buf = NULL;
memset(&wpa_suite, 0, sizeof(wpa_suite));
switch (type) {
case SME_RSN_UCAST_REQUEST:
wpa_suite.size = cpu_to_le16(1);
switch (priv->wpa.pairwise_suite) {
case IW_AUTH_CIPHER_NONE:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
CIPHER_ID_WPA2_NONE : CIPHER_ID_WPA_NONE;
break;
case IW_AUTH_CIPHER_WEP40:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
CIPHER_ID_WPA2_WEP40 : CIPHER_ID_WPA_WEP40;
break;
case IW_AUTH_CIPHER_TKIP:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
CIPHER_ID_WPA2_TKIP : CIPHER_ID_WPA_TKIP;
break;
case IW_AUTH_CIPHER_CCMP:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
CIPHER_ID_WPA2_CCMP : CIPHER_ID_WPA_CCMP;
break;
case IW_AUTH_CIPHER_WEP104:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
CIPHER_ID_WPA2_WEP104 : CIPHER_ID_WPA_WEP104;
break;
}
if (buf)
memcpy(&wpa_suite.suite[0][0], buf, CIPHER_ID_LEN);
size = sizeof(wpa_suite.size) +
(CIPHER_ID_LEN * le16_to_cpu(wpa_suite.size));
hostif_mib_set_request_ostring(priv,
DOT11_RSN_CONFIG_UNICAST_CIPHER,
&wpa_suite, size);
break;
case SME_RSN_MCAST_REQUEST:
switch (priv->wpa.group_suite) {
case IW_AUTH_CIPHER_NONE:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
CIPHER_ID_WPA2_NONE : CIPHER_ID_WPA_NONE;
break;
case IW_AUTH_CIPHER_WEP40:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
CIPHER_ID_WPA2_WEP40 : CIPHER_ID_WPA_WEP40;
break;
case IW_AUTH_CIPHER_TKIP:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
CIPHER_ID_WPA2_TKIP : CIPHER_ID_WPA_TKIP;
break;
case IW_AUTH_CIPHER_CCMP:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
CIPHER_ID_WPA2_CCMP : CIPHER_ID_WPA_CCMP;
break;
case IW_AUTH_CIPHER_WEP104:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
CIPHER_ID_WPA2_WEP104 : CIPHER_ID_WPA_WEP104;
break;
}
if (buf)
memcpy(&wpa_suite.suite[0][0], buf, CIPHER_ID_LEN);
hostif_mib_set_request_ostring(priv,
DOT11_RSN_CONFIG_MULTICAST_CIPHER,
&wpa_suite.suite[0][0],
CIPHER_ID_LEN);
break;
case SME_RSN_AUTH_REQUEST:
wpa_suite.size = cpu_to_le16(1);
switch (priv->wpa.key_mgmt_suite) {
case IW_AUTH_KEY_MGMT_802_1X:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
KEY_MGMT_ID_WPA2_1X : KEY_MGMT_ID_WPA_1X;
break;
case IW_AUTH_KEY_MGMT_PSK:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
KEY_MGMT_ID_WPA2_PSK : KEY_MGMT_ID_WPA_PSK;
break;
case 0:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
KEY_MGMT_ID_WPA2_NONE : KEY_MGMT_ID_WPA_NONE;
break;
case 4:
buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
KEY_MGMT_ID_WPA2_WPANONE :
KEY_MGMT_ID_WPA_WPANONE;
break;
}
if (buf)
memcpy(&wpa_suite.suite[0][0], buf, KEY_MGMT_ID_LEN);
size = sizeof(wpa_suite.size) +
(KEY_MGMT_ID_LEN * le16_to_cpu(wpa_suite.size));
hostif_mib_set_request_ostring(priv,
DOT11_RSN_CONFIG_AUTH_SUITE,
&wpa_suite, size);
break;
case SME_RSN_ENABLED_REQUEST:
hostif_mib_set_request_bool(priv, DOT11_RSN_ENABLED,
priv->wpa.rsn_enabled);
break;
case SME_RSN_MODE_REQUEST:
mode = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
RSN_MODE_WPA2 :
(priv->wpa.version == IW_AUTH_WPA_VERSION_WPA) ?
RSN_MODE_WPA : RSN_MODE_NONE;
rsn_mode.rsn_mode = cpu_to_le32(mode);
rsn_mode.rsn_capability = cpu_to_le16(0);
hostif_mib_set_request_ostring(priv, LOCAL_RSN_MODE,
&rsn_mode, sizeof(rsn_mode));
break;
}
}
static
void hostif_sme_mode_setup(struct ks_wlan_private *priv)
{
unsigned char rate_size;
unsigned char rate_octet[RATE_SET_MAX_SIZE];
int i = 0;
/* rate setting if rate segging is auto for changing phy_type (#94) */
if (priv->reg.tx_rate == TX_RATE_FULL_AUTO) {
if (priv->reg.phy_type == D_11B_ONLY_MODE) {
priv->reg.rate_set.body[3] = TX_RATE_11M;
priv->reg.rate_set.body[2] = TX_RATE_5M;
priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
priv->reg.rate_set.size = 4;
} else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
priv->reg.rate_set.body[11] = TX_RATE_54M;
priv->reg.rate_set.body[10] = TX_RATE_48M;
priv->reg.rate_set.body[9] = TX_RATE_36M;
priv->reg.rate_set.body[8] = TX_RATE_18M;
priv->reg.rate_set.body[7] = TX_RATE_9M;
priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
priv->reg.rate_set.size = 12;
}
}
/* rate mask by phy setting */
if (priv->reg.phy_type == D_11B_ONLY_MODE) {
for (i = 0; i < priv->reg.rate_set.size; i++) {
if (!is_11b_rate(priv->reg.rate_set.body[i]))
break;
if ((priv->reg.rate_set.body[i] & RATE_MASK) >= TX_RATE_5M) {
rate_octet[i] = priv->reg.rate_set.body[i] &
RATE_MASK;
} else {
rate_octet[i] = priv->reg.rate_set.body[i];
}
}
} else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
for (i = 0; i < priv->reg.rate_set.size; i++) {
if (!is_11bg_rate(priv->reg.rate_set.body[i]))
break;
if (is_ofdm_ext_rate(priv->reg.rate_set.body[i])) {
rate_octet[i] = priv->reg.rate_set.body[i] &
RATE_MASK;
} else {
rate_octet[i] = priv->reg.rate_set.body[i];
}
}
}
rate_size = i;
if (rate_size == 0) {
if (priv->reg.phy_type == D_11G_ONLY_MODE)
rate_octet[0] = TX_RATE_6M | BASIC_RATE;
else
rate_octet[0] = TX_RATE_2M | BASIC_RATE;
rate_size = 1;
}
/* rate set update */
priv->reg.rate_set.size = rate_size;
memcpy(&priv->reg.rate_set.body[0], &rate_octet[0], rate_size);
switch (priv->reg.operation_mode) {
case MODE_PSEUDO_ADHOC:
hostif_ps_adhoc_set_request(priv);
break;
case MODE_INFRASTRUCTURE:
if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) {
hostif_infrastructure_set_request(priv,
HIF_INFRA_SET_REQ);
} else {
hostif_infrastructure_set_request(priv,
HIF_INFRA_SET2_REQ);
netdev_dbg(priv->net_dev,
"Infra bssid = %pM\n", priv->reg.bssid);
}
break;
case MODE_ADHOC:
if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) {
hostif_adhoc_set_request(priv);
} else {
hostif_adhoc_set2_request(priv);
netdev_dbg(priv->net_dev,
"Adhoc bssid = %pM\n", priv->reg.bssid);
}
break;
default:
break;
}
}
static
void hostif_sme_multicast_set(struct ks_wlan_private *priv)
{
struct net_device *dev = priv->net_dev;
int mc_count;
struct netdev_hw_addr *ha;
char set_address[NIC_MAX_MCAST_LIST * ETH_ALEN];
int i = 0;
spin_lock(&priv->multicast_spin);
memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
if (dev->flags & IFF_PROMISC) {
hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
MCAST_FILTER_PROMISC);
goto spin_unlock;
}
if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
(dev->flags & IFF_ALLMULTI)) {
hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
MCAST_FILTER_MCASTALL);
goto spin_unlock;
}
if (priv->sme_i.sme_flag & SME_MULTICAST) {
mc_count = netdev_mc_count(dev);
netdev_for_each_mc_addr(ha, dev) {
ether_addr_copy(&set_address[i * ETH_ALEN], ha->addr);
i++;
}
priv->sme_i.sme_flag &= ~SME_MULTICAST;
hostif_mib_set_request_ostring(priv, LOCAL_MULTICAST_ADDRESS,
&set_address[0],
ETH_ALEN * mc_count);
} else {
priv->sme_i.sme_flag |= SME_MULTICAST;
hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
MCAST_FILTER_MCAST);
}
spin_unlock:
spin_unlock(&priv->multicast_spin);
}
static void hostif_sme_power_mgmt_set(struct ks_wlan_private *priv)
{
u32 mode, wake_up, receive_dtims;
if (priv->reg.power_mgmt != POWER_MGMT_SAVE1 &&
priv->reg.power_mgmt != POWER_MGMT_SAVE2) {
mode = POWER_ACTIVE;
wake_up = 0;
receive_dtims = 0;
} else {
mode = (priv->reg.operation_mode == MODE_INFRASTRUCTURE) ?
POWER_SAVE : POWER_ACTIVE;
wake_up = 0;
receive_dtims = (priv->reg.operation_mode == MODE_INFRASTRUCTURE &&
priv->reg.power_mgmt == POWER_MGMT_SAVE2);
}
hostif_power_mgmt_request(priv, mode, wake_up, receive_dtims);
}
static void hostif_sme_sleep_set(struct ks_wlan_private *priv)
{
if (priv->sleep_mode != SLP_SLEEP &&
priv->sleep_mode != SLP_ACTIVE)
return;
hostif_sleep_request(priv, priv->sleep_mode);
}
static
void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
{
switch (type) {
case SME_SET_FLAG:
hostif_mib_set_request_bool(priv, DOT11_PRIVACY_INVOKED,
priv->reg.privacy_invoked);
break;
case SME_SET_TXKEY:
hostif_mib_set_request_int(priv, DOT11_WEP_DEFAULT_KEY_ID,
priv->wpa.txkey);
break;
case SME_SET_KEY1:
hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE1,
&priv->wpa.key[0].key_val[0],
priv->wpa.key[0].key_len);
break;
case SME_SET_KEY2:
hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE2,
&priv->wpa.key[1].key_val[0],
priv->wpa.key[1].key_len);
break;
case SME_SET_KEY3:
hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE3,
&priv->wpa.key[2].key_val[0],
priv->wpa.key[2].key_len);
break;
case SME_SET_KEY4:
hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE4,
&priv->wpa.key[3].key_val[0],
priv->wpa.key[3].key_len);
break;
case SME_SET_PMK_TSC:
hostif_mib_set_request_ostring(priv, DOT11_PMK_TSC,
&priv->wpa.key[0].rx_seq[0],
WPA_RX_SEQ_LEN);
break;
case SME_SET_GMK1_TSC:
hostif_mib_set_request_ostring(priv, DOT11_GMK1_TSC,
&priv->wpa.key[1].rx_seq[0],
WPA_RX_SEQ_LEN);
break;
case SME_SET_GMK2_TSC:
hostif_mib_set_request_ostring(priv, DOT11_GMK2_TSC,
&priv->wpa.key[2].rx_seq[0],
WPA_RX_SEQ_LEN);
break;
}
}
static
void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
{
struct pmk_cache {
__le16 size;
struct {
u8 bssid[ETH_ALEN];
u8 pmkid[IW_PMKID_LEN];
} __packed list[PMK_LIST_MAX];
} __packed pmkcache;
struct pmk *pmk;
size_t size;
int i = 0;
list_for_each_entry(pmk, &priv->pmklist.head, list) {
if (i >= PMK_LIST_MAX)
break;
ether_addr_copy(pmkcache.list[i].bssid, pmk->bssid);
memcpy(pmkcache.list[i].pmkid, pmk->pmkid, IW_PMKID_LEN);
i++;
}
pmkcache.size = cpu_to_le16(priv->pmklist.size);
size = sizeof(priv->pmklist.size) +
((ETH_ALEN + IW_PMKID_LEN) * priv->pmklist.size);
hostif_mib_set_request_ostring(priv, LOCAL_PMK, &pmkcache, size);
}
/* execute sme */
static void hostif_sme_execute(struct ks_wlan_private *priv, int event)
{
u16 failure;
switch (event) {
case SME_START:
if (priv->dev_state == DEVICE_STATE_BOOT)
hostif_mib_get_request(priv, DOT11_MAC_ADDRESS);
break;
case SME_MULTICAST_REQUEST:
hostif_sme_multicast_set(priv);
break;
case SME_MACADDRESS_SET_REQUEST:
hostif_mib_set_request_ostring(priv, LOCAL_CURRENTADDRESS,
&priv->eth_addr[0], ETH_ALEN);
break;
case SME_BSS_SCAN_REQUEST:
hostif_bss_scan_request(priv, priv->reg.scan_type,
priv->scan_ssid, priv->scan_ssid_len);
break;
case SME_POW_MNGMT_REQUEST:
hostif_sme_power_mgmt_set(priv);
break;
case SME_PHY_INFO_REQUEST:
hostif_phy_information_request(priv);
break;
case SME_MIC_FAILURE_REQUEST:
failure = priv->wpa.mic_failure.failure;
if (failure != 1 && failure != 2) {
netdev_err(priv->net_dev,
"SME_MIC_FAILURE_REQUEST: failure count=%u error?\n",
failure);
return;
}
hostif_mic_failure_request(priv, failure - 1, (failure == 1) ?
0 : priv->wpa.mic_failure.counter);
break;
case SME_MIC_FAILURE_CONFIRM:
if (priv->wpa.mic_failure.failure == 2) {
if (priv->wpa.mic_failure.stop)
priv->wpa.mic_failure.stop = 0;
priv->wpa.mic_failure.failure = 0;
hostif_start_request(priv, priv->reg.operation_mode);
}
break;
case SME_GET_MAC_ADDRESS:
if (priv->dev_state == DEVICE_STATE_BOOT)
hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION);
break;
case SME_GET_PRODUCT_VERSION:
if (priv->dev_state == DEVICE_STATE_BOOT)
priv->dev_state = DEVICE_STATE_PREINIT;
break;
case SME_STOP_REQUEST:
hostif_stop_request(priv);
break;
case SME_RTS_THRESHOLD_REQUEST:
hostif_mib_set_request_int(priv, DOT11_RTS_THRESHOLD,
priv->reg.rts);
break;
case SME_FRAGMENTATION_THRESHOLD_REQUEST:
hostif_mib_set_request_int(priv, DOT11_FRAGMENTATION_THRESHOLD,
priv->reg.fragment);
break;
case SME_WEP_INDEX_REQUEST:
case SME_WEP_KEY1_REQUEST:
case SME_WEP_KEY2_REQUEST:
case SME_WEP_KEY3_REQUEST:
case SME_WEP_KEY4_REQUEST:
case SME_WEP_FLAG_REQUEST:
hostif_sme_set_wep(priv, event);
break;
case SME_RSN_UCAST_REQUEST:
case SME_RSN_MCAST_REQUEST:
case SME_RSN_AUTH_REQUEST:
case SME_RSN_ENABLED_REQUEST:
case SME_RSN_MODE_REQUEST:
hostif_sme_set_rsn(priv, event);
break;
case SME_SET_FLAG:
case SME_SET_TXKEY:
case SME_SET_KEY1:
case SME_SET_KEY2:
case SME_SET_KEY3:
case SME_SET_KEY4:
case SME_SET_PMK_TSC:
case SME_SET_GMK1_TSC:
case SME_SET_GMK2_TSC:
hostif_sme_set_key(priv, event);
break;
case SME_SET_PMKSA:
hostif_sme_set_pmksa(priv);
break;
case SME_WPS_ENABLE_REQUEST:
hostif_mib_set_request_int(priv, LOCAL_WPS_ENABLE,
priv->wps.wps_enabled);
break;
case SME_WPS_PROBE_REQUEST:
hostif_mib_set_request_ostring(priv, LOCAL_WPS_PROBE_REQ,
priv->wps.ie, priv->wps.ielen);
break;
case SME_MODE_SET_REQUEST:
hostif_sme_mode_setup(priv);
break;
case SME_SET_GAIN:
hostif_mib_set_request_ostring(priv, LOCAL_GAIN,
&priv->gain, sizeof(priv->gain));
break;
case SME_GET_GAIN:
hostif_mib_get_request(priv, LOCAL_GAIN);
break;
case SME_GET_EEPROM_CKSUM:
priv->eeprom_checksum = EEPROM_FW_NOT_SUPPORT; /* initialize */
hostif_mib_get_request(priv, LOCAL_EEPROM_SUM);
break;
case SME_START_REQUEST:
hostif_start_request(priv, priv->reg.operation_mode);
break;
case SME_START_CONFIRM:
/* for power save */
atomic_set(&priv->psstatus.snooze_guard, 0);
atomic_set(&priv->psstatus.confirm_wait, 0);
if (priv->dev_state == DEVICE_STATE_PREINIT)
priv->dev_state = DEVICE_STATE_INIT;
/* wake_up_interruptible_all(&priv->confirm_wait); */
complete(&priv->confirm_wait);
break;
case SME_SLEEP_REQUEST:
hostif_sme_sleep_set(priv);
break;
case SME_SET_REGION:
hostif_mib_set_request_int(priv, LOCAL_REGION, priv->region);
break;
case SME_MULTICAST_CONFIRM:
case SME_BSS_SCAN_CONFIRM:
case SME_POW_MNGMT_CONFIRM:
case SME_PHY_INFO_CONFIRM:
case SME_STOP_CONFIRM:
case SME_RTS_THRESHOLD_CONFIRM:
case SME_FRAGMENTATION_THRESHOLD_CONFIRM:
case SME_WEP_INDEX_CONFIRM:
case SME_WEP_KEY1_CONFIRM:
case SME_WEP_KEY2_CONFIRM:
case SME_WEP_KEY3_CONFIRM:
case SME_WEP_KEY4_CONFIRM:
case SME_WEP_FLAG_CONFIRM:
case SME_RSN_UCAST_CONFIRM:
case SME_RSN_MCAST_CONFIRM:
case SME_RSN_AUTH_CONFIRM:
case SME_RSN_ENABLED_CONFIRM:
case SME_RSN_MODE_CONFIRM:
case SME_MODE_SET_CONFIRM:
case SME_TERMINATE:
default:
break;
}
}
static void hostif_sme_work(struct work_struct *work)
{
struct ks_wlan_private *priv;
priv = container_of(work, struct ks_wlan_private, sme_work);
if (priv->dev_state < DEVICE_STATE_BOOT)
return;
if (cnt_smeqbody(priv) <= 0)
return;
hostif_sme_execute(priv, priv->sme_i.event_buff[priv->sme_i.qhead]);
inc_smeqhead(priv);
if (cnt_smeqbody(priv) > 0)
schedule_work(&priv->sme_work);
}
/* send to Station Management Entity module */
void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event)
{
/* enqueue sme event */
if (cnt_smeqbody(priv) < (SME_EVENT_BUFF_SIZE - 1)) {
priv->sme_i.event_buff[priv->sme_i.qtail] = event;
inc_smeqtail(priv);
} else {
/* in case of buffer overflow */
netdev_err(priv->net_dev, "sme queue buffer overflow\n");
}
schedule_work(&priv->sme_work);
}
static inline void hostif_aplist_init(struct ks_wlan_private *priv)
{
size_t size = LOCAL_APLIST_MAX * sizeof(struct local_ap);
priv->aplist.size = 0;
memset(&priv->aplist.ap[0], 0, size);
}
static inline void hostif_status_init(struct ks_wlan_private *priv)
{
priv->infra_status = 0;
priv->current_rate = 4;
priv->connect_status = DISCONNECT_STATUS;
}
static inline void hostif_sme_init(struct ks_wlan_private *priv)
{
priv->sme_i.sme_status = SME_IDLE;
priv->sme_i.qhead = 0;
priv->sme_i.qtail = 0;
spin_lock_init(&priv->sme_i.sme_spin);
priv->sme_i.sme_flag = 0;
INIT_WORK(&priv->sme_work, hostif_sme_work);
}
static inline void hostif_wpa_init(struct ks_wlan_private *priv)
{
memset(&priv->wpa, 0, sizeof(priv->wpa));
priv->wpa.rsn_enabled = false;
priv->wpa.mic_failure.failure = 0;
priv->wpa.mic_failure.last_failure_time = 0;
priv->wpa.mic_failure.stop = 0;
}
static inline void hostif_power_save_init(struct ks_wlan_private *priv)
{
atomic_set(&priv->psstatus.status, PS_NONE);
atomic_set(&priv->psstatus.confirm_wait, 0);
atomic_set(&priv->psstatus.snooze_guard, 0);
init_completion(&priv->psstatus.wakeup_wait);
INIT_WORK(&priv->wakeup_work, ks_wlan_hw_wakeup_task);
}
static inline void hostif_pmklist_init(struct ks_wlan_private *priv)
{
int i;
memset(&priv->pmklist, 0, sizeof(priv->pmklist));
INIT_LIST_HEAD(&priv->pmklist.head);
for (i = 0; i < PMK_LIST_MAX; i++)
INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
}
static inline void hostif_counters_init(struct ks_wlan_private *priv)
{
priv->dev_count = 0;
atomic_set(&priv->event_count, 0);
atomic_set(&priv->rec_count, 0);
}
int hostif_init(struct ks_wlan_private *priv)
{
hostif_aplist_init(priv);
hostif_status_init(priv);
spin_lock_init(&priv->multicast_spin);
spin_lock_init(&priv->dev_read_lock);
init_waitqueue_head(&priv->devread_wait);
hostif_counters_init(priv);
hostif_power_save_init(priv);
hostif_wpa_init(priv);
hostif_pmklist_init(priv);
hostif_sme_init(priv);
return 0;
}
void hostif_exit(struct ks_wlan_private *priv)
{
cancel_work_sync(&priv->sme_work);
}
| linux-master | drivers/staging/ks7010/ks_hostif.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for KeyStream, KS7010 based SDIO cards.
*
* Copyright (C) 2006-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
* Copyright (C) 2016 Sang Engineering, Wolfram Sang
*/
#include <linux/atomic.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include "ks_wlan.h"
#include "ks_hostif.h"
#define ROM_FILE "ks7010sd.rom"
/* SDIO KeyStream vendor and device */
#define SDIO_VENDOR_ID_KS_CODE_A 0x005b
#define SDIO_VENDOR_ID_KS_CODE_B 0x0023
/* Older sources suggest earlier versions were named 7910 or 79xx */
#define SDIO_DEVICE_ID_KS_7010 0x7910
/* Read/Write Status Register */
#define READ_STATUS_REG 0x000000
#define WRITE_STATUS_REG 0x00000C
enum reg_status_type {
REG_STATUS_BUSY,
REG_STATUS_IDLE
};
/* Read Index Register */
#define READ_INDEX_REG 0x000004
/* Read Data Size Register */
#define READ_DATA_SIZE_REG 0x000008
/* Write Index Register */
#define WRITE_INDEX_REG 0x000010
/*
* Write Status/Read Data Size Register
* for network packet (less than 2048 bytes data)
*/
#define WSTATUS_RSIZE_REG 0x000014
/* Write Status Register value */
#define WSTATUS_MASK 0x80
/* Read Data Size Register value [10:4] */
#define RSIZE_MASK 0x7F
/* ARM to SD interrupt Enable */
#define INT_ENABLE_REG 0x000020
/* ARM to SD interrupt Pending */
#define INT_PENDING_REG 0x000024
#define INT_GCR_B BIT(7)
#define INT_GCR_A BIT(6)
#define INT_WRITE_STATUS BIT(5)
#define INT_WRITE_INDEX BIT(4)
#define INT_WRITE_SIZE BIT(3)
#define INT_READ_STATUS BIT(2)
#define INT_READ_INDEX BIT(1)
#define INT_READ_SIZE BIT(0)
/* General Communication Register A */
#define GCR_A_REG 0x000028
enum gen_com_reg_a {
GCR_A_INIT,
GCR_A_REMAP,
GCR_A_RUN
};
/* General Communication Register B */
#define GCR_B_REG 0x00002C
enum gen_com_reg_b {
GCR_B_ACTIVE,
GCR_B_DOZE
};
/* Wakeup Register */
#define WAKEUP_REG 0x008018
#define WAKEUP_REQ 0x5a
/* AHB Data Window 0x010000-0x01FFFF */
#define DATA_WINDOW 0x010000
#define WINDOW_SIZE (64 * 1024)
#define KS7010_IRAM_ADDRESS 0x06000000
#define KS7010_IO_BLOCK_SIZE 512
/**
* struct ks_sdio_card - SDIO device data.
*
* Structure is used as the &struct sdio_func private data.
*
* @func: Pointer to the SDIO function device.
* @priv: Pointer to the &struct net_device private data.
*/
struct ks_sdio_card {
struct sdio_func *func;
struct ks_wlan_private *priv;
};
static struct sdio_func *ks7010_to_func(struct ks_wlan_private *priv)
{
struct ks_sdio_card *ks_sdio = priv->if_hw;
return ks_sdio->func;
}
/* Read single byte from device address into byte (CMD52) */
static int ks7010_sdio_readb(struct ks_wlan_private *priv,
u32 address, u8 *byte)
{
struct sdio_func *func = ks7010_to_func(priv);
int ret;
*byte = sdio_readb(func, address, &ret);
return ret;
}
/* Read length bytes from device address into buffer (CMD53) */
static int ks7010_sdio_read(struct ks_wlan_private *priv, u32 address,
u8 *buffer, unsigned int length)
{
struct sdio_func *func = ks7010_to_func(priv);
return sdio_memcpy_fromio(func, buffer, address, length);
}
/* Write single byte to device address (CMD52) */
static int ks7010_sdio_writeb(struct ks_wlan_private *priv,
u32 address, u8 byte)
{
struct sdio_func *func = ks7010_to_func(priv);
int ret;
sdio_writeb(func, byte, address, &ret);
return ret;
}
/* Write length bytes to device address from buffer (CMD53) */
static int ks7010_sdio_write(struct ks_wlan_private *priv, u32 address,
u8 *buffer, unsigned int length)
{
struct sdio_func *func = ks7010_to_func(priv);
return sdio_memcpy_toio(func, address, buffer, length);
}
static void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
{
int ret;
/* clear request */
atomic_set(&priv->sleepstatus.doze_request, 0);
if (atomic_read(&priv->sleepstatus.status) == 0) {
ret = ks7010_sdio_writeb(priv, GCR_B_REG, GCR_B_DOZE);
if (ret) {
netdev_err(priv->net_dev, "write GCR_B_REG\n");
goto set_sleep_mode;
}
atomic_set(&priv->sleepstatus.status, 1);
priv->last_doze = jiffies;
}
set_sleep_mode:
priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
}
static void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
{
int ret;
/* clear request */
atomic_set(&priv->sleepstatus.wakeup_request, 0);
if (atomic_read(&priv->sleepstatus.status) == 1) {
ret = ks7010_sdio_writeb(priv, WAKEUP_REG, WAKEUP_REQ);
if (ret) {
netdev_err(priv->net_dev, "write WAKEUP_REG\n");
goto set_sleep_mode;
}
atomic_set(&priv->sleepstatus.status, 0);
priv->last_wakeup = jiffies;
++priv->wakeup_count;
}
set_sleep_mode:
priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
}
void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
{
int ret;
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
ret = ks7010_sdio_writeb(priv, WAKEUP_REG, WAKEUP_REQ);
if (ret)
netdev_err(priv->net_dev, "write WAKEUP_REG\n");
priv->last_wakeup = jiffies;
++priv->wakeup_count;
}
}
static void _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
{
u8 byte;
int ret;
if (priv->reg.power_mgmt == POWER_MGMT_ACTIVE)
return;
if (priv->reg.operation_mode != MODE_INFRASTRUCTURE)
return;
if (!is_connect_status(priv->connect_status))
return;
if (priv->dev_state != DEVICE_STATE_SLEEP)
return;
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE)
return;
netdev_dbg(priv->net_dev,
"STATUS:\n"
"- psstatus.status = %d\n"
"- psstatus.confirm_wait = %d\n"
"- psstatus.snooze_guard = %d\n"
"- txq_count = %d\n",
atomic_read(&priv->psstatus.status),
atomic_read(&priv->psstatus.confirm_wait),
atomic_read(&priv->psstatus.snooze_guard),
txq_count(priv));
if (atomic_read(&priv->psstatus.confirm_wait) ||
atomic_read(&priv->psstatus.snooze_guard) ||
txq_has_space(priv)) {
queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
return;
}
ret = ks7010_sdio_readb(priv, INT_PENDING_REG, &byte);
if (ret) {
netdev_err(priv->net_dev, "read INT_PENDING_REG\n");
goto queue_delayed_work;
}
if (byte)
goto queue_delayed_work;
ret = ks7010_sdio_writeb(priv, GCR_B_REG, GCR_B_DOZE);
if (ret) {
netdev_err(priv->net_dev, "write GCR_B_REG\n");
goto queue_delayed_work;
}
atomic_set(&priv->psstatus.status, PS_SNOOZE);
return;
queue_delayed_work:
queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
}
int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
{
queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
return 0;
}
static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
unsigned long size,
void (*complete_handler)(struct ks_wlan_private *priv,
struct sk_buff *skb),
struct sk_buff *skb)
{
struct tx_device_buffer *sp;
int ret;
if (priv->dev_state < DEVICE_STATE_BOOT) {
ret = -EPERM;
goto err_complete;
}
if ((TX_DEVICE_BUFF_SIZE - 1) <= txq_count(priv)) {
netdev_err(priv->net_dev, "tx buffer overflow\n");
ret = -EOVERFLOW;
goto err_complete;
}
sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qtail];
sp->sendp = p;
sp->size = size;
sp->complete_handler = complete_handler;
sp->skb = skb;
inc_txqtail(priv);
return 0;
err_complete:
kfree(p);
if (complete_handler)
(*complete_handler)(priv, skb);
return ret;
}
/* write data */
static int write_to_device(struct ks_wlan_private *priv, u8 *buffer,
unsigned long size)
{
struct hostif_hdr *hdr;
int ret;
hdr = (struct hostif_hdr *)buffer;
if (le16_to_cpu(hdr->event) < HIF_DATA_REQ ||
le16_to_cpu(hdr->event) > HIF_REQ_MAX) {
netdev_err(priv->net_dev, "unknown event=%04X\n", hdr->event);
return 0;
}
ret = ks7010_sdio_write(priv, DATA_WINDOW, buffer, size);
if (ret) {
netdev_err(priv->net_dev, "write DATA_WINDOW\n");
return ret;
}
ret = ks7010_sdio_writeb(priv, WRITE_STATUS_REG, REG_STATUS_BUSY);
if (ret) {
netdev_err(priv->net_dev, "write WRITE_STATUS_REG\n");
return ret;
}
return 0;
}
static void tx_device_task(struct ks_wlan_private *priv)
{
struct tx_device_buffer *sp;
int ret;
if (!txq_has_space(priv) ||
atomic_read(&priv->psstatus.status) == PS_SNOOZE)
return;
sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
if (priv->dev_state >= DEVICE_STATE_BOOT) {
ret = write_to_device(priv, sp->sendp, sp->size);
if (ret) {
netdev_err(priv->net_dev,
"write_to_device error !!(%d)\n", ret);
queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
return;
}
}
kfree(sp->sendp);
if (sp->complete_handler) /* TX Complete */
(*sp->complete_handler)(priv, sp->skb);
inc_txqhead(priv);
if (txq_has_space(priv))
queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
}
int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
void (*complete_handler)(struct ks_wlan_private *priv,
struct sk_buff *skb),
struct sk_buff *skb)
{
int result;
struct hostif_hdr *hdr;
hdr = (struct hostif_hdr *)p;
if (le16_to_cpu(hdr->event) < HIF_DATA_REQ ||
le16_to_cpu(hdr->event) > HIF_REQ_MAX) {
netdev_err(priv->net_dev, "unknown event=%04X\n", hdr->event);
return 0;
}
/* add event to hostt buffer */
priv->hostt.buff[priv->hostt.qtail] = le16_to_cpu(hdr->event);
priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
spin_lock(&priv->tx_dev.tx_dev_lock);
result = enqueue_txdev(priv, p, size, complete_handler, skb);
spin_unlock(&priv->tx_dev.tx_dev_lock);
if (txq_has_space(priv))
queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
return result;
}
static void rx_event_task(struct tasklet_struct *t)
{
struct ks_wlan_private *priv = from_tasklet(priv, t, rx_bh_task);
struct rx_device_buffer *rp;
if (rxq_has_space(priv) && priv->dev_state >= DEVICE_STATE_BOOT) {
rp = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qhead];
hostif_receive(priv, rp->data, rp->size);
inc_rxqhead(priv);
if (rxq_has_space(priv))
tasklet_schedule(&priv->rx_bh_task);
}
}
static void ks_wlan_hw_rx(struct ks_wlan_private *priv, size_t size)
{
int ret;
struct rx_device_buffer *rx_buffer;
struct hostif_hdr *hdr;
u16 event = 0;
/* receive data */
if (rxq_count(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
netdev_err(priv->net_dev, "rx buffer overflow\n");
return;
}
rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail];
ret = ks7010_sdio_read(priv, DATA_WINDOW, &rx_buffer->data[0],
hif_align_size(size));
if (ret)
return;
/* length check */
if (size > 2046 || size == 0) {
#ifdef DEBUG
print_hex_dump_bytes("INVALID DATA dump: ",
DUMP_PREFIX_OFFSET,
rx_buffer->data, 32);
#endif
ret = ks7010_sdio_writeb(priv, READ_STATUS_REG,
REG_STATUS_IDLE);
if (ret)
netdev_err(priv->net_dev, "write READ_STATUS_REG\n");
/* length check fail */
return;
}
hdr = (struct hostif_hdr *)&rx_buffer->data[0];
rx_buffer->size = le16_to_cpu(hdr->size) + sizeof(hdr->size);
event = le16_to_cpu(hdr->event);
inc_rxqtail(priv);
ret = ks7010_sdio_writeb(priv, READ_STATUS_REG, REG_STATUS_IDLE);
if (ret)
netdev_err(priv->net_dev, "write READ_STATUS_REG\n");
if (atomic_read(&priv->psstatus.confirm_wait) && is_hif_conf(event)) {
netdev_dbg(priv->net_dev, "IS_HIF_CONF true !!\n");
atomic_dec(&priv->psstatus.confirm_wait);
}
tasklet_schedule(&priv->rx_bh_task);
}
static void ks7010_rw_function(struct work_struct *work)
{
struct ks_wlan_private *priv = container_of(work,
struct ks_wlan_private,
rw_dwork.work);
struct sdio_func *func = ks7010_to_func(priv);
u8 byte;
int ret;
/* wait after DOZE */
if (time_after(priv->last_doze + msecs_to_jiffies(30), jiffies)) {
netdev_dbg(priv->net_dev, "wait after DOZE\n");
queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
return;
}
/* wait after WAKEUP */
while (time_after(priv->last_wakeup + msecs_to_jiffies(30), jiffies)) {
netdev_dbg(priv->net_dev, "wait after WAKEUP\n");
dev_info(&func->dev, "wake: %lu %lu\n",
priv->last_wakeup + msecs_to_jiffies(30), jiffies);
msleep(30);
}
sdio_claim_host(func);
/* power save wakeup */
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
if (txq_has_space(priv)) {
ks_wlan_hw_wakeup_request(priv);
queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
}
goto release_host;
}
/* sleep mode doze */
if (atomic_read(&priv->sleepstatus.doze_request) == 1) {
ks_wlan_hw_sleep_doze_request(priv);
goto release_host;
}
/* sleep mode wakeup */
if (atomic_read(&priv->sleepstatus.wakeup_request) == 1) {
ks_wlan_hw_sleep_wakeup_request(priv);
goto release_host;
}
/* read (WriteStatus/ReadDataSize FN1:00_0014) */
ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE_REG, &byte);
if (ret) {
netdev_err(priv->net_dev, "read WSTATUS_RSIZE_REG psstatus=%d\n",
atomic_read(&priv->psstatus.status));
goto release_host;
}
if (byte & RSIZE_MASK) { /* Read schedule */
ks_wlan_hw_rx(priv, (size_t)((byte & RSIZE_MASK) << 4));
}
if ((byte & WSTATUS_MASK))
tx_device_task(priv);
_ks_wlan_hw_power_save(priv);
release_host:
sdio_release_host(func);
}
static void ks_sdio_interrupt(struct sdio_func *func)
{
int ret;
struct ks_sdio_card *card;
struct ks_wlan_private *priv;
u8 status, rsize, byte;
card = sdio_get_drvdata(func);
priv = card->priv;
if (priv->dev_state < DEVICE_STATE_BOOT)
goto queue_delayed_work;
ret = ks7010_sdio_readb(priv, INT_PENDING_REG, &status);
if (ret) {
netdev_err(priv->net_dev, "read INT_PENDING_REG\n");
goto queue_delayed_work;
}
/* schedule task for interrupt status */
/* bit7 -> Write General Communication B register */
/* read (General Communication B register) */
/* bit5 -> Write Status Idle */
/* bit2 -> Read Status Busy */
if (status & INT_GCR_B ||
atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
ret = ks7010_sdio_readb(priv, GCR_B_REG, &byte);
if (ret) {
netdev_err(priv->net_dev, "read GCR_B_REG\n");
goto queue_delayed_work;
}
if (byte == GCR_B_ACTIVE) {
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
atomic_set(&priv->psstatus.status, PS_WAKEUP);
priv->wakeup_count = 0;
}
complete(&priv->psstatus.wakeup_wait);
}
}
do {
/* read (WriteStatus/ReadDataSize FN1:00_0014) */
ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE_REG, &byte);
if (ret) {
netdev_err(priv->net_dev, "read WSTATUS_RSIZE_REG\n");
goto queue_delayed_work;
}
rsize = byte & RSIZE_MASK;
if (rsize != 0) /* Read schedule */
ks_wlan_hw_rx(priv, (size_t)(rsize << 4));
if (byte & WSTATUS_MASK) {
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
if (txq_has_space(priv)) {
ks_wlan_hw_wakeup_request(priv);
queue_delayed_work(priv->wq,
&priv->rw_dwork, 1);
return;
}
} else {
tx_device_task(priv);
}
}
} while (rsize);
queue_delayed_work:
queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
}
static int trx_device_init(struct ks_wlan_private *priv)
{
priv->tx_dev.qhead = 0;
priv->tx_dev.qtail = 0;
priv->rx_dev.qhead = 0;
priv->rx_dev.qtail = 0;
spin_lock_init(&priv->tx_dev.tx_dev_lock);
spin_lock_init(&priv->rx_dev.rx_dev_lock);
tasklet_setup(&priv->rx_bh_task, rx_event_task);
return 0;
}
static void trx_device_exit(struct ks_wlan_private *priv)
{
struct tx_device_buffer *sp;
/* tx buffer clear */
while (txq_has_space(priv)) {
sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
kfree(sp->sendp);
if (sp->complete_handler) /* TX Complete */
(*sp->complete_handler)(priv, sp->skb);
inc_txqhead(priv);
}
tasklet_kill(&priv->rx_bh_task);
}
static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
{
int ret;
unsigned char *data_buf;
data_buf = kmemdup(&index, sizeof(u32), GFP_KERNEL);
if (!data_buf)
return -ENOMEM;
ret = ks7010_sdio_write(priv, WRITE_INDEX_REG, data_buf, sizeof(index));
if (ret)
goto err_free_data_buf;
ret = ks7010_sdio_write(priv, READ_INDEX_REG, data_buf, sizeof(index));
if (ret)
goto err_free_data_buf;
return 0;
err_free_data_buf:
kfree(data_buf);
return ret;
}
#define ROM_BUFF_SIZE (64 * 1024)
static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
u8 *data, unsigned int size)
{
int ret;
u8 *read_buf;
read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
if (!read_buf)
return -ENOMEM;
ret = ks7010_sdio_read(priv, address, read_buf, size);
if (ret)
goto err_free_read_buf;
if (memcmp(data, read_buf, size) != 0) {
ret = -EIO;
netdev_err(priv->net_dev, "data compare error (%d)\n", ret);
goto err_free_read_buf;
}
return 0;
err_free_read_buf:
kfree(read_buf);
return ret;
}
static int ks7010_copy_firmware(struct ks_wlan_private *priv,
const struct firmware *fw_entry)
{
unsigned int length;
unsigned int size;
unsigned int offset;
unsigned int n = 0;
u8 *rom_buf;
int ret;
rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
if (!rom_buf)
return -ENOMEM;
length = fw_entry->size;
do {
if (length >= ROM_BUFF_SIZE) {
size = ROM_BUFF_SIZE;
length = length - ROM_BUFF_SIZE;
} else {
size = length;
length = 0;
}
if (size == 0)
break;
memcpy(rom_buf, fw_entry->data + n, size);
offset = n;
ret = ks7010_sdio_update_index(priv,
KS7010_IRAM_ADDRESS + offset);
if (ret)
goto free_rom_buf;
ret = ks7010_sdio_write(priv, DATA_WINDOW, rom_buf, size);
if (ret)
goto free_rom_buf;
ret = ks7010_sdio_data_compare(priv,
DATA_WINDOW, rom_buf, size);
if (ret)
goto free_rom_buf;
n += size;
} while (size);
ret = ks7010_sdio_writeb(priv, GCR_A_REG, GCR_A_REMAP);
free_rom_buf:
kfree(rom_buf);
return ret;
}
static int ks7010_upload_firmware(struct ks_sdio_card *card)
{
struct ks_wlan_private *priv = card->priv;
struct sdio_func *func = ks7010_to_func(priv);
unsigned int n;
u8 byte = 0;
int ret;
const struct firmware *fw_entry = NULL;
sdio_claim_host(func);
/* Firmware running ? */
ret = ks7010_sdio_readb(priv, GCR_A_REG, &byte);
if (ret)
goto release_host;
if (byte == GCR_A_RUN) {
netdev_dbg(priv->net_dev, "MAC firmware running ...\n");
ret = -EBUSY;
goto release_host;
}
ret = request_firmware(&fw_entry, ROM_FILE,
&func->dev);
if (ret)
goto release_host;
ret = ks7010_copy_firmware(priv, fw_entry);
if (ret)
goto release_firmware;
/* Firmware running check */
for (n = 0; n < 50; ++n) {
usleep_range(10000, 11000); /* wait_ms(10); */
ret = ks7010_sdio_readb(priv, GCR_A_REG, &byte);
if (ret)
goto release_firmware;
if (byte == GCR_A_RUN)
break;
}
if ((50) <= n) {
netdev_err(priv->net_dev, "firmware can't start\n");
ret = -EIO;
goto release_firmware;
}
ret = 0;
release_firmware:
release_firmware(fw_entry);
release_host:
sdio_release_host(func);
return ret;
}
static void ks7010_sme_enqueue_events(struct ks_wlan_private *priv)
{
static const u16 init_events[] = {
SME_GET_EEPROM_CKSUM, SME_STOP_REQUEST,
SME_RTS_THRESHOLD_REQUEST, SME_FRAGMENTATION_THRESHOLD_REQUEST,
SME_WEP_INDEX_REQUEST, SME_WEP_KEY1_REQUEST,
SME_WEP_KEY2_REQUEST, SME_WEP_KEY3_REQUEST,
SME_WEP_KEY4_REQUEST, SME_WEP_FLAG_REQUEST,
SME_RSN_ENABLED_REQUEST, SME_MODE_SET_REQUEST,
SME_START_REQUEST
};
int ev;
for (ev = 0; ev < ARRAY_SIZE(init_events); ev++)
hostif_sme_enqueue(priv, init_events[ev]);
}
static void ks7010_card_init(struct ks_wlan_private *priv)
{
init_completion(&priv->confirm_wait);
/* get mac address & firmware version */
hostif_sme_enqueue(priv, SME_START);
if (!wait_for_completion_interruptible_timeout
(&priv->confirm_wait, 5 * HZ)) {
netdev_dbg(priv->net_dev, "wait time out!! SME_START\n");
}
if (priv->mac_address_valid && priv->version_size != 0)
priv->dev_state = DEVICE_STATE_PREINIT;
ks7010_sme_enqueue_events(priv);
if (!wait_for_completion_interruptible_timeout
(&priv->confirm_wait, 5 * HZ)) {
netdev_dbg(priv->net_dev, "wait time out!! wireless parameter set\n");
}
if (priv->dev_state >= DEVICE_STATE_PREINIT) {
netdev_dbg(priv->net_dev, "DEVICE READY!!\n");
priv->dev_state = DEVICE_STATE_READY;
}
}
static void ks7010_init_defaults(struct ks_wlan_private *priv)
{
priv->reg.tx_rate = TX_RATE_AUTO;
priv->reg.preamble = LONG_PREAMBLE;
priv->reg.power_mgmt = POWER_MGMT_ACTIVE;
priv->reg.scan_type = ACTIVE_SCAN;
priv->reg.beacon_lost_count = 20;
priv->reg.rts = 2347UL;
priv->reg.fragment = 2346UL;
priv->reg.phy_type = D_11BG_COMPATIBLE_MODE;
priv->reg.cts_mode = CTS_MODE_FALSE;
priv->reg.rate_set.body[11] = TX_RATE_54M;
priv->reg.rate_set.body[10] = TX_RATE_48M;
priv->reg.rate_set.body[9] = TX_RATE_36M;
priv->reg.rate_set.body[8] = TX_RATE_18M;
priv->reg.rate_set.body[7] = TX_RATE_9M;
priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
priv->reg.tx_rate = TX_RATE_FULL_AUTO;
priv->reg.rate_set.size = 12;
}
static int ks7010_sdio_setup_irqs(struct sdio_func *func)
{
int ret;
/* interrupt disable */
sdio_writeb(func, 0, INT_ENABLE_REG, &ret);
if (ret)
goto irq_error;
sdio_writeb(func, 0xff, INT_PENDING_REG, &ret);
if (ret)
goto irq_error;
/* setup interrupt handler */
ret = sdio_claim_irq(func, ks_sdio_interrupt);
irq_error:
return ret;
}
static void ks7010_sdio_init_irqs(struct sdio_func *func,
struct ks_wlan_private *priv)
{
u8 byte;
int ret;
/*
* interrupt setting
* clear Interrupt status write
* (ARMtoSD_InterruptPending FN1:00_0024)
*/
sdio_claim_host(func);
ret = ks7010_sdio_writeb(priv, INT_PENDING_REG, 0xff);
sdio_release_host(func);
if (ret)
netdev_err(priv->net_dev, "write INT_PENDING_REG\n");
/* enable ks7010sdio interrupt */
byte = (INT_GCR_B | INT_READ_STATUS | INT_WRITE_STATUS);
sdio_claim_host(func);
ret = ks7010_sdio_writeb(priv, INT_ENABLE_REG, byte);
sdio_release_host(func);
if (ret)
netdev_err(priv->net_dev, "write INT_ENABLE_REG\n");
}
static void ks7010_private_init(struct ks_wlan_private *priv,
struct ks_sdio_card *card,
struct net_device *netdev)
{
/* private memory initialize */
priv->if_hw = card;
priv->dev_state = DEVICE_STATE_PREBOOT;
priv->net_dev = netdev;
priv->firmware_version[0] = '\0';
priv->version_size = 0;
priv->last_doze = jiffies;
priv->last_wakeup = jiffies;
memset(&priv->nstats, 0, sizeof(priv->nstats));
memset(&priv->wstats, 0, sizeof(priv->wstats));
/* sleep mode */
atomic_set(&priv->sleepstatus.status, 0);
atomic_set(&priv->sleepstatus.doze_request, 0);
atomic_set(&priv->sleepstatus.wakeup_request, 0);
trx_device_init(priv);
hostif_init(priv);
ks_wlan_net_start(netdev);
ks7010_init_defaults(priv);
}
static int ks7010_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *device)
{
struct ks_wlan_private *priv = NULL;
struct net_device *netdev = NULL;
struct ks_sdio_card *card;
int ret;
card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
card->func = func;
sdio_claim_host(func);
ret = sdio_set_block_size(func, KS7010_IO_BLOCK_SIZE);
if (ret)
goto err_free_card;
dev_dbg(&card->func->dev, "multi_block=%d sdio_set_block_size()=%d %d\n",
func->card->cccr.multi_block, func->cur_blksize, ret);
ret = sdio_enable_func(func);
if (ret)
goto err_free_card;
ret = ks7010_sdio_setup_irqs(func);
if (ret)
goto err_disable_func;
sdio_release_host(func);
sdio_set_drvdata(func, card);
dev_dbg(&card->func->dev, "class = 0x%X, vendor = 0x%X, device = 0x%X\n",
func->class, func->vendor, func->device);
/* private memory allocate */
netdev = alloc_etherdev(sizeof(*priv));
if (!netdev) {
dev_err(&card->func->dev, "Unable to alloc new net device\n");
goto err_release_irq;
}
ret = dev_alloc_name(netdev, "wlan%d");
if (ret < 0) {
dev_err(&card->func->dev, "Couldn't get name!\n");
goto err_free_netdev;
}
priv = netdev_priv(netdev);
card->priv = priv;
SET_NETDEV_DEV(netdev, &card->func->dev);
ks7010_private_init(priv, card, netdev);
ret = ks7010_upload_firmware(card);
if (ret) {
netdev_err(priv->net_dev,
"firmware load failed !! ret = %d\n", ret);
goto err_free_netdev;
}
ks7010_sdio_init_irqs(func, priv);
priv->dev_state = DEVICE_STATE_BOOT;
priv->wq = alloc_workqueue("wq", WQ_MEM_RECLAIM, 1);
if (!priv->wq) {
netdev_err(priv->net_dev, "create_workqueue failed !!\n");
goto err_free_netdev;
}
INIT_DELAYED_WORK(&priv->rw_dwork, ks7010_rw_function);
ks7010_card_init(priv);
ret = register_netdev(priv->net_dev);
if (ret)
goto err_destroy_wq;
return 0;
err_destroy_wq:
destroy_workqueue(priv->wq);
err_free_netdev:
free_netdev(netdev);
err_release_irq:
sdio_claim_host(func);
sdio_release_irq(func);
err_disable_func:
sdio_disable_func(func);
err_free_card:
sdio_release_host(func);
sdio_set_drvdata(func, NULL);
kfree(card);
return -ENODEV;
}
/* send stop request to MAC */
static int send_stop_request(struct sdio_func *func)
{
struct hostif_stop_request *pp;
struct ks_sdio_card *card;
size_t size;
card = sdio_get_drvdata(func);
pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
if (!pp)
return -ENOMEM;
size = sizeof(*pp) - sizeof(pp->header.size);
pp->header.size = cpu_to_le16(size);
pp->header.event = cpu_to_le16(HIF_STOP_REQ);
sdio_claim_host(func);
write_to_device(card->priv, (u8 *)pp, hif_align_size(sizeof(*pp)));
sdio_release_host(func);
kfree(pp);
return 0;
}
static void ks7010_sdio_remove(struct sdio_func *func)
{
int ret;
struct ks_sdio_card *card;
struct ks_wlan_private *priv;
card = sdio_get_drvdata(func);
if (!card)
return;
priv = card->priv;
if (!priv)
goto err_free_card;
ks_wlan_net_stop(priv->net_dev);
/* interrupt disable */
sdio_claim_host(func);
sdio_writeb(func, 0, INT_ENABLE_REG, &ret);
sdio_writeb(func, 0xff, INT_PENDING_REG, &ret);
sdio_release_host(func);
ret = send_stop_request(func);
if (ret) /* memory allocation failure */
goto err_free_card;
if (priv->wq)
destroy_workqueue(priv->wq);
hostif_exit(priv);
unregister_netdev(priv->net_dev);
trx_device_exit(priv);
free_netdev(priv->net_dev);
card->priv = NULL;
sdio_claim_host(func);
sdio_release_irq(func);
sdio_disable_func(func);
sdio_release_host(func);
err_free_card:
sdio_set_drvdata(func, NULL);
kfree(card);
}
static const struct sdio_device_id ks7010_sdio_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_A, SDIO_DEVICE_ID_KS_7010)},
{SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_B, SDIO_DEVICE_ID_KS_7010)},
{ /* all zero */ }
};
MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
static struct sdio_driver ks7010_sdio_driver = {
.name = "ks7010_sdio",
.id_table = ks7010_sdio_ids,
.probe = ks7010_sdio_probe,
.remove = ks7010_sdio_remove,
};
module_driver(ks7010_sdio_driver, sdio_register_driver, sdio_unregister_driver);
MODULE_AUTHOR("Sang Engineering, Qi-Hardware, KeyStream");
MODULE_DESCRIPTION("Driver for KeyStream KS7010 based SDIO cards");
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ROM_FILE);
| linux-master | drivers/staging/ks7010/ks7010_sdio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for KeyStream 11b/g wireless LAN
*
* Copyright (C) 2005-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
*/
#include <linux/atomic.h>
#include <linux/completion.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
static int wep_on_off;
#define WEP_OFF 0
#define WEP_ON_64BIT 1
#define WEP_ON_128BIT 2
#include "ks_wlan.h"
#include "ks_hostif.h"
#include "ks_wlan_ioctl.h"
/* Include Wireless Extension definition and check version */
#include <linux/wireless.h>
#define WIRELESS_SPY /* enable iwspy support */
#include <net/iw_handler.h> /* New driver API */
/* Frequency list (map channels to frequencies) */
static const long frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484
};
/* A few details needed for WEP (Wireless Equivalent Privacy) */
#define MAX_KEY_SIZE 13 /* 128 (?) bits */
#define MIN_KEY_SIZE 5 /* 40 bits RC4 - WEP */
struct wep_key {
u16 len;
u8 key[16]; /* 40-bit and 104-bit keys */
};
/*
* function prototypes
*/
static int ks_wlan_open(struct net_device *dev);
static void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
static netdev_tx_t ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
static int ks_wlan_close(struct net_device *dev);
static void ks_wlan_set_rx_mode(struct net_device *dev);
static struct net_device_stats *ks_wlan_get_stats(struct net_device *dev);
static int ks_wlan_set_mac_address(struct net_device *dev, void *addr);
static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
int cmd);
static atomic_t update_phyinfo;
static struct timer_list update_phyinfo_timer;
static
int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
{
struct iw_statistics *wstats = &priv->wstats;
netdev_dbg(priv->net_dev, "in_interrupt = %ld\n", in_interrupt());
if (priv->dev_state < DEVICE_STATE_READY)
return -EBUSY; /* not finished initialize */
if (atomic_read(&update_phyinfo))
return -EPERM;
/* The status */
wstats->status = priv->reg.operation_mode; /* Operation mode */
/* Signal quality and co. But where is the noise level ??? */
hostif_sme_enqueue(priv, SME_PHY_INFO_REQUEST);
/* interruptible_sleep_on_timeout(&priv->confirm_wait, HZ/2); */
if (!wait_for_completion_interruptible_timeout
(&priv->confirm_wait, HZ / 2)) {
netdev_dbg(priv->net_dev, "wait time out!!\n");
}
atomic_inc(&update_phyinfo);
update_phyinfo_timer.expires = jiffies + HZ; /* 1sec */
add_timer(&update_phyinfo_timer);
return 0;
}
static
void ks_wlan_update_phyinfo_timeout(struct timer_list *unused)
{
pr_debug("in_interrupt = %ld\n", in_interrupt());
atomic_set(&update_phyinfo, 0);
}
int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
unsigned int commit_flag)
{
hostif_sme_enqueue(priv, SME_STOP_REQUEST);
if (commit_flag & SME_RTS)
hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_REQUEST);
if (commit_flag & SME_FRAG)
hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_REQUEST);
if (commit_flag & SME_WEP_INDEX)
hostif_sme_enqueue(priv, SME_WEP_INDEX_REQUEST);
if (commit_flag & SME_WEP_VAL1)
hostif_sme_enqueue(priv, SME_WEP_KEY1_REQUEST);
if (commit_flag & SME_WEP_VAL2)
hostif_sme_enqueue(priv, SME_WEP_KEY2_REQUEST);
if (commit_flag & SME_WEP_VAL3)
hostif_sme_enqueue(priv, SME_WEP_KEY3_REQUEST);
if (commit_flag & SME_WEP_VAL4)
hostif_sme_enqueue(priv, SME_WEP_KEY4_REQUEST);
if (commit_flag & SME_WEP_FLAG)
hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
if (commit_flag & SME_RSN) {
hostif_sme_enqueue(priv, SME_RSN_ENABLED_REQUEST);
hostif_sme_enqueue(priv, SME_RSN_MODE_REQUEST);
}
if (commit_flag & SME_RSN_MULTICAST)
hostif_sme_enqueue(priv, SME_RSN_MCAST_REQUEST);
if (commit_flag & SME_RSN_UNICAST)
hostif_sme_enqueue(priv, SME_RSN_UCAST_REQUEST);
if (commit_flag & SME_RSN_AUTH)
hostif_sme_enqueue(priv, SME_RSN_AUTH_REQUEST);
hostif_sme_enqueue(priv, SME_MODE_SET_REQUEST);
hostif_sme_enqueue(priv, SME_START_REQUEST);
return 0;
}
/*
* Initial Wireless Extension code for Ks_Wlannet driver by :
* Jean Tourrilhes <[email protected]> - HPL - 17 November 00
* Conversion to new driver API by :
* Jean Tourrilhes <[email protected]> - HPL - 26 March 02
* Javier also did a good amount of work here, adding some new extensions
* and fixing my code. Let's just say that without him this code just
* would not work at all... - Jean II
*/
static int ks_wlan_get_name(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *cwrq,
char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (priv->dev_state < DEVICE_STATE_READY)
strscpy(cwrq->name, "NOT READY!", sizeof(cwrq->name));
else if (priv->reg.phy_type == D_11B_ONLY_MODE)
strscpy(cwrq->name, "IEEE 802.11b", sizeof(cwrq->name));
else if (priv->reg.phy_type == D_11G_ONLY_MODE)
strscpy(cwrq->name, "IEEE 802.11g", sizeof(cwrq->name));
else
strscpy(cwrq->name, "IEEE 802.11b/g", sizeof(cwrq->name));
return 0;
}
static int ks_wlan_set_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *fwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
int channel;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
/* If setting by frequency, convert to a channel */
if ((fwrq->freq.e == 1) &&
(fwrq->freq.m >= 241200000) && (fwrq->freq.m <= 248700000)) {
int f = fwrq->freq.m / 100000;
int c = 0;
while ((c < 14) && (f != frequency_list[c]))
c++;
/* Hack to fall through... */
fwrq->freq.e = 0;
fwrq->freq.m = c + 1;
}
/* Setting by channel number */
if ((fwrq->freq.m > 1000) || (fwrq->freq.e > 0))
return -EOPNOTSUPP;
channel = fwrq->freq.m;
/* We should do a better check than that,
* based on the card capability !!!
*/
if ((channel < 1) || (channel > 14)) {
netdev_dbg(dev, "%s: New channel value of %d is invalid!\n",
dev->name, fwrq->freq.m);
return -EINVAL;
}
/* Yes ! We can set it !!! */
priv->reg.channel = (u8)(channel);
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
static int ks_wlan_get_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *fwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
int f;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (is_connect_status(priv->connect_status))
f = (int)priv->current_ap.channel;
else
f = (int)priv->reg.channel;
fwrq->freq.m = frequency_list[f - 1] * 100000;
fwrq->freq.e = 1;
return 0;
}
static int ks_wlan_set_essid(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
size_t len;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
/* Check if we asked for `any' */
if (!dwrq->essid.flags) {
/* Just send an empty SSID list */
memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
priv->reg.ssid.size = 0;
} else {
len = dwrq->essid.length;
/* iwconfig uses nul termination in SSID.. */
if (len > 0 && extra[len - 1] == '\0')
len--;
/* Check the size of the string */
if (len > IW_ESSID_MAX_SIZE)
return -EINVAL;
/* Set the SSID */
memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
memcpy(priv->reg.ssid.body, extra, len);
priv->reg.ssid.size = len;
}
/* Write it to the card */
priv->need_commit |= SME_MODE_SET;
ks_wlan_setup_parameter(priv, priv->need_commit);
priv->need_commit = 0;
return 0;
}
static int ks_wlan_get_essid(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
/* Note : if dwrq->flags != 0, we should
* get the relevant SSID from the SSID list...
*/
if (priv->reg.ssid.size != 0) {
/* Get the current SSID */
memcpy(extra, priv->reg.ssid.body, priv->reg.ssid.size);
/* If none, we may want to get the one that was set */
/* Push it out ! */
dwrq->essid.length = priv->reg.ssid.size;
dwrq->essid.flags = 1; /* active */
} else {
dwrq->essid.length = 0;
dwrq->essid.flags = 0; /* ANY */
}
return 0;
}
static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *awrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (priv->reg.operation_mode != MODE_ADHOC &&
priv->reg.operation_mode != MODE_INFRASTRUCTURE) {
eth_zero_addr(priv->reg.bssid);
return -EOPNOTSUPP;
}
ether_addr_copy(priv->reg.bssid, awrq->ap_addr.sa_data);
if (is_valid_ether_addr((u8 *)priv->reg.bssid))
priv->need_commit |= SME_MODE_SET;
netdev_dbg(dev, "bssid = %pM\n", priv->reg.bssid);
/* Write it to the card */
if (priv->need_commit) {
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
return 0;
}
static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *awrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (is_connect_status(priv->connect_status))
ether_addr_copy(awrq->ap_addr.sa_data, priv->current_ap.bssid);
else
eth_zero_addr(awrq->ap_addr.sa_data);
awrq->ap_addr.sa_family = ARPHRD_ETHER;
return 0;
}
static int ks_wlan_set_nick(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
/* Check the size of the string */
if (dwrq->data.length > 16 + 1)
return -E2BIG;
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, dwrq->data.length);
return -EINPROGRESS; /* Call commit handler */
}
static int ks_wlan_get_nick(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
strscpy(extra, priv->nick, 17);
dwrq->data.length = strlen(extra) + 1;
return 0;
}
static int ks_wlan_set_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
int i = 0;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (priv->reg.phy_type == D_11B_ONLY_MODE) {
if (vwrq->bitrate.fixed == 1) {
switch (vwrq->bitrate.value) {
case 11000000:
case 5500000:
priv->reg.rate_set.body[0] =
(u8)(vwrq->bitrate.value / 500000);
break;
case 2000000:
case 1000000:
priv->reg.rate_set.body[0] =
((u8)(vwrq->bitrate.value / 500000)) |
BASIC_RATE;
break;
default:
return -EINVAL;
}
priv->reg.tx_rate = TX_RATE_FIXED;
priv->reg.rate_set.size = 1;
} else { /* vwrq->fixed == 0 */
if (vwrq->bitrate.value > 0) {
switch (vwrq->bitrate.value) {
case 11000000:
priv->reg.rate_set.body[3] =
TX_RATE_11M;
i++;
fallthrough;
case 5500000:
priv->reg.rate_set.body[2] = TX_RATE_5M;
i++;
fallthrough;
case 2000000:
priv->reg.rate_set.body[1] =
TX_RATE_2M | BASIC_RATE;
i++;
fallthrough;
case 1000000:
priv->reg.rate_set.body[0] =
TX_RATE_1M | BASIC_RATE;
i++;
break;
default:
return -EINVAL;
}
priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
priv->reg.rate_set.size = i;
} else {
priv->reg.rate_set.body[3] = TX_RATE_11M;
priv->reg.rate_set.body[2] = TX_RATE_5M;
priv->reg.rate_set.body[1] =
TX_RATE_2M | BASIC_RATE;
priv->reg.rate_set.body[0] =
TX_RATE_1M | BASIC_RATE;
priv->reg.tx_rate = TX_RATE_FULL_AUTO;
priv->reg.rate_set.size = 4;
}
}
} else { /* D_11B_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
if (vwrq->bitrate.fixed == 1) {
switch (vwrq->bitrate.value) {
case 54000000:
case 48000000:
case 36000000:
case 18000000:
case 9000000:
priv->reg.rate_set.body[0] =
(u8)(vwrq->bitrate.value / 500000);
break;
case 24000000:
case 12000000:
case 11000000:
case 6000000:
case 5500000:
case 2000000:
case 1000000:
priv->reg.rate_set.body[0] =
((u8)(vwrq->bitrate.value / 500000)) |
BASIC_RATE;
break;
default:
return -EINVAL;
}
priv->reg.tx_rate = TX_RATE_FIXED;
priv->reg.rate_set.size = 1;
} else { /* vwrq->fixed == 0 */
if (vwrq->bitrate.value > 0) {
switch (vwrq->bitrate.value) {
case 54000000:
priv->reg.rate_set.body[11] =
TX_RATE_54M;
i++;
fallthrough;
case 48000000:
priv->reg.rate_set.body[10] =
TX_RATE_48M;
i++;
fallthrough;
case 36000000:
priv->reg.rate_set.body[9] =
TX_RATE_36M;
i++;
fallthrough;
case 24000000:
case 18000000:
case 12000000:
case 11000000:
case 9000000:
case 6000000:
if (vwrq->bitrate.value == 24000000) {
priv->reg.rate_set.body[8] =
TX_RATE_18M;
i++;
priv->reg.rate_set.body[7] =
TX_RATE_9M;
i++;
priv->reg.rate_set.body[6] =
TX_RATE_24M | BASIC_RATE;
i++;
priv->reg.rate_set.body[5] =
TX_RATE_12M | BASIC_RATE;
i++;
priv->reg.rate_set.body[4] =
TX_RATE_6M | BASIC_RATE;
i++;
priv->reg.rate_set.body[3] =
TX_RATE_11M | BASIC_RATE;
i++;
} else if (vwrq->bitrate.value == 18000000) {
priv->reg.rate_set.body[7] =
TX_RATE_18M;
i++;
priv->reg.rate_set.body[6] =
TX_RATE_9M;
i++;
priv->reg.rate_set.body[5] =
TX_RATE_12M | BASIC_RATE;
i++;
priv->reg.rate_set.body[4] =
TX_RATE_6M | BASIC_RATE;
i++;
priv->reg.rate_set.body[3] =
TX_RATE_11M | BASIC_RATE;
i++;
} else if (vwrq->bitrate.value == 12000000) {
priv->reg.rate_set.body[6] =
TX_RATE_9M;
i++;
priv->reg.rate_set.body[5] =
TX_RATE_12M | BASIC_RATE;
i++;
priv->reg.rate_set.body[4] =
TX_RATE_6M | BASIC_RATE;
i++;
priv->reg.rate_set.body[3] =
TX_RATE_11M | BASIC_RATE;
i++;
} else if (vwrq->bitrate.value == 11000000) {
priv->reg.rate_set.body[5] =
TX_RATE_9M;
i++;
priv->reg.rate_set.body[4] =
TX_RATE_6M | BASIC_RATE;
i++;
priv->reg.rate_set.body[3] =
TX_RATE_11M | BASIC_RATE;
i++;
} else if (vwrq->bitrate.value == 9000000) {
priv->reg.rate_set.body[4] =
TX_RATE_9M;
i++;
priv->reg.rate_set.body[3] =
TX_RATE_6M | BASIC_RATE;
i++;
} else { /* vwrq->value == 6000000 */
priv->reg.rate_set.body[3] =
TX_RATE_6M | BASIC_RATE;
i++;
}
fallthrough;
case 5500000:
priv->reg.rate_set.body[2] =
TX_RATE_5M | BASIC_RATE;
i++;
fallthrough;
case 2000000:
priv->reg.rate_set.body[1] =
TX_RATE_2M | BASIC_RATE;
i++;
fallthrough;
case 1000000:
priv->reg.rate_set.body[0] =
TX_RATE_1M | BASIC_RATE;
i++;
break;
default:
return -EINVAL;
}
priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
priv->reg.rate_set.size = i;
} else {
priv->reg.rate_set.body[11] = TX_RATE_54M;
priv->reg.rate_set.body[10] = TX_RATE_48M;
priv->reg.rate_set.body[9] = TX_RATE_36M;
priv->reg.rate_set.body[8] = TX_RATE_18M;
priv->reg.rate_set.body[7] = TX_RATE_9M;
priv->reg.rate_set.body[6] =
TX_RATE_24M | BASIC_RATE;
priv->reg.rate_set.body[5] =
TX_RATE_12M | BASIC_RATE;
priv->reg.rate_set.body[4] =
TX_RATE_6M | BASIC_RATE;
priv->reg.rate_set.body[3] =
TX_RATE_11M | BASIC_RATE;
priv->reg.rate_set.body[2] =
TX_RATE_5M | BASIC_RATE;
priv->reg.rate_set.body[1] =
TX_RATE_2M | BASIC_RATE;
priv->reg.rate_set.body[0] =
TX_RATE_1M | BASIC_RATE;
priv->reg.tx_rate = TX_RATE_FULL_AUTO;
priv->reg.rate_set.size = 12;
}
}
}
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
static int ks_wlan_get_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
netdev_dbg(dev, "in_interrupt = %ld update_phyinfo = %d\n",
in_interrupt(), atomic_read(&update_phyinfo));
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (!atomic_read(&update_phyinfo))
ks_wlan_update_phy_information(priv);
vwrq->bitrate.value = ((priv->current_rate) & RATE_MASK) * 500000;
vwrq->bitrate.fixed = (priv->reg.tx_rate == TX_RATE_FIXED) ? 1 : 0;
return 0;
}
static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
int rthr = vwrq->rts.value;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (vwrq->rts.disabled)
rthr = 2347;
if ((rthr < 0) || (rthr > 2347))
return -EINVAL;
priv->reg.rts = rthr;
priv->need_commit |= SME_RTS;
return -EINPROGRESS; /* Call commit handler */
}
static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
vwrq->rts.value = priv->reg.rts;
vwrq->rts.disabled = (vwrq->rts.value >= 2347);
vwrq->rts.fixed = 1;
return 0;
}
static int ks_wlan_set_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
int fthr = vwrq->frag.value;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (vwrq->frag.disabled)
fthr = 2346;
if ((fthr < 256) || (fthr > 2346))
return -EINVAL;
fthr &= ~0x1; /* Get an even value - is it really needed ??? */
priv->reg.fragment = fthr;
priv->need_commit |= SME_FRAG;
return -EINPROGRESS; /* Call commit handler */
}
static int ks_wlan_get_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
vwrq->frag.value = priv->reg.fragment;
vwrq->frag.disabled = (vwrq->frag.value >= 2346);
vwrq->frag.fixed = 1;
return 0;
}
static int ks_wlan_set_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
if (uwrq->mode != IW_MODE_ADHOC &&
uwrq->mode != IW_MODE_INFRA)
return -EINVAL;
priv->reg.operation_mode = (uwrq->mode == IW_MODE_ADHOC) ?
MODE_ADHOC : MODE_INFRASTRUCTURE;
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
static int ks_wlan_get_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* If not managed, assume it's ad-hoc */
uwrq->mode = (priv->reg.operation_mode == MODE_INFRASTRUCTURE) ?
IW_MODE_INFRA : IW_MODE_ADHOC;
return 0;
}
static int ks_wlan_set_encode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_point *enc = &dwrq->encoding;
struct wep_key key;
int index = (enc->flags & IW_ENCODE_INDEX);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
if (enc->length > MAX_KEY_SIZE)
return -EINVAL;
/* for SLEEP MODE */
if ((index < 0) || (index > 4))
return -EINVAL;
index = (index == 0) ? priv->reg.wep_index : (index - 1);
/* Is WEP supported ? */
/* Basic checking: do we have a key to set ? */
if (enc->length > 0) {
key.len = (enc->length > MIN_KEY_SIZE) ?
MAX_KEY_SIZE : MIN_KEY_SIZE;
priv->reg.privacy_invoked = 0x01;
priv->need_commit |= SME_WEP_FLAG;
wep_on_off = (enc->length > MIN_KEY_SIZE) ?
WEP_ON_128BIT : WEP_ON_64BIT;
/* Check if the key is not marked as invalid */
if (enc->flags & IW_ENCODE_NOKEY)
return 0;
/* Cleanup */
memset(key.key, 0, MAX_KEY_SIZE);
/* Copy the key in the driver */
if (copy_from_user(key.key, enc->pointer, enc->length)) {
key.len = 0;
return -EFAULT;
}
/* Send the key to the card */
priv->reg.wep_key[index].size = key.len;
memcpy(&priv->reg.wep_key[index].val[0], &key.key[0],
priv->reg.wep_key[index].size);
priv->need_commit |= (SME_WEP_VAL1 << index);
priv->reg.wep_index = index;
priv->need_commit |= SME_WEP_INDEX;
} else {
if (enc->flags & IW_ENCODE_DISABLED) {
priv->reg.wep_key[0].size = 0;
priv->reg.wep_key[1].size = 0;
priv->reg.wep_key[2].size = 0;
priv->reg.wep_key[3].size = 0;
priv->reg.privacy_invoked = 0x00;
if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
priv->need_commit |= SME_MODE_SET;
priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
wep_on_off = WEP_OFF;
priv->need_commit |= SME_WEP_FLAG;
} else {
/* set_wep_key(priv, index, 0, 0, 1); xxx */
if (priv->reg.wep_key[index].size == 0)
return -EINVAL;
priv->reg.wep_index = index;
priv->need_commit |= SME_WEP_INDEX;
}
}
/* Commit the changes if needed */
if (enc->flags & IW_ENCODE_MODE)
priv->need_commit |= SME_WEP_FLAG;
if (enc->flags & IW_ENCODE_OPEN) {
if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
priv->need_commit |= SME_MODE_SET;
priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
} else if (enc->flags & IW_ENCODE_RESTRICTED) {
if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM)
priv->need_commit |= SME_MODE_SET;
priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
}
if (priv->need_commit) {
ks_wlan_setup_parameter(priv, priv->need_commit);
priv->need_commit = 0;
}
return 0;
}
static int ks_wlan_get_encode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_point *enc = &dwrq->encoding;
int index = (enc->flags & IW_ENCODE_INDEX) - 1;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
enc->flags = IW_ENCODE_DISABLED;
/* Check encryption mode */
switch (priv->reg.authenticate_type) {
case AUTH_TYPE_OPEN_SYSTEM:
enc->flags = IW_ENCODE_OPEN;
break;
case AUTH_TYPE_SHARED_KEY:
enc->flags = IW_ENCODE_RESTRICTED;
break;
}
/* Which key do we want ? -1 -> tx index */
if ((index < 0) || (index >= 4))
index = priv->reg.wep_index;
if (priv->reg.privacy_invoked) {
enc->flags &= ~IW_ENCODE_DISABLED;
/* dwrq->flags |= IW_ENCODE_NOKEY; */
}
enc->flags |= index + 1;
/* Copy the key to the user buffer */
if (index >= 0 && index < 4) {
enc->length = (priv->reg.wep_key[index].size <= 16) ?
priv->reg.wep_key[index].size : 0;
memcpy(extra, priv->reg.wep_key[index].val, enc->length);
}
return 0;
}
static int ks_wlan_get_range(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_range *range = (struct iw_range *)extra;
int i, k;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
dwrq->data.length = sizeof(struct iw_range);
memset(range, 0, sizeof(*range));
range->min_nwid = 0x0000;
range->max_nwid = 0x0000;
range->num_channels = 14;
/* Should be based on cap_rid.country to give only
* what the current card support
*/
k = 0;
for (i = 0; i < 13; i++) { /* channel 1 -- 13 */
range->freq[k].i = i + 1; /* List index */
range->freq[k].m = frequency_list[i] * 100000;
range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
}
range->num_frequency = k;
if (priv->reg.phy_type == D_11B_ONLY_MODE ||
priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) { /* channel 14 */
range->freq[13].i = 14; /* List index */
range->freq[13].m = frequency_list[13] * 100000;
range->freq[13].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
range->num_frequency = 14;
}
/* Hum... Should put the right values there */
range->max_qual.qual = 100;
range->max_qual.level = 256 - 128; /* 0 dBm? */
range->max_qual.noise = 256 - 128;
range->sensitivity = 1;
if (priv->reg.phy_type == D_11B_ONLY_MODE) {
range->bitrate[0] = 1e6;
range->bitrate[1] = 2e6;
range->bitrate[2] = 5.5e6;
range->bitrate[3] = 11e6;
range->num_bitrates = 4;
} else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
range->bitrate[0] = 1e6;
range->bitrate[1] = 2e6;
range->bitrate[2] = 5.5e6;
range->bitrate[3] = 11e6;
range->bitrate[4] = 6e6;
range->bitrate[5] = 9e6;
range->bitrate[6] = 12e6;
if (IW_MAX_BITRATES < 9) {
range->bitrate[7] = 54e6;
range->num_bitrates = 8;
} else {
range->bitrate[7] = 18e6;
range->bitrate[8] = 24e6;
range->bitrate[9] = 36e6;
range->bitrate[10] = 48e6;
range->bitrate[11] = 54e6;
range->num_bitrates = 12;
}
}
/* Set an indication of the max TCP throughput
* in bit/s that we can expect using this interface.
* May be use for QoS stuff... Jean II
*/
if (i > 2)
range->throughput = 5000 * 1000;
else
range->throughput = 1500 * 1000;
range->min_rts = 0;
range->max_rts = 2347;
range->min_frag = 256;
range->max_frag = 2346;
range->encoding_size[0] = 5; /* WEP: RC4 40 bits */
range->encoding_size[1] = 13; /* WEP: RC4 ~128 bits */
range->num_encoding_sizes = 2;
range->max_encoding_tokens = 4;
/* power management not support */
range->pmp_flags = IW_POWER_ON;
range->pmt_flags = IW_POWER_ON;
range->pm_capa = 0;
/* Transmit Power - values are in dBm( or mW) */
range->txpower[0] = -256;
range->num_txpower = 1;
range->txpower_capa = IW_TXPOW_DBM;
/* range->txpower_capa = IW_TXPOW_MWATT; */
range->we_version_source = 21;
range->we_version_compiled = WIRELESS_EXT;
range->retry_capa = IW_RETRY_ON;
range->retry_flags = IW_RETRY_ON;
range->r_time_flags = IW_RETRY_ON;
/* Experimental measurements - boundary 11/5.5 Mb/s
*
* Note : with or without the (local->rssi), results
* are somewhat different. - Jean II
*/
range->avg_qual.qual = 50;
range->avg_qual.level = 186; /* -70 dBm */
range->avg_qual.noise = 0;
/* Event capability (kernel + driver) */
range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
IW_EVENT_CAPA_MASK(SIOCGIWAP) |
IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
range->event_capa[1] = IW_EVENT_CAPA_K_1;
range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
IW_EVENT_CAPA_MASK(IWEVMICHAELMICFAILURE));
/* encode extension (WPA) capability */
range->enc_capa = (IW_ENC_CAPA_WPA |
IW_ENC_CAPA_WPA2 |
IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP);
return 0;
}
static int ks_wlan_set_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
if (vwrq->power.disabled) {
priv->reg.power_mgmt = POWER_MGMT_ACTIVE;
} else {
if (priv->reg.operation_mode != MODE_INFRASTRUCTURE)
return -EINVAL;
priv->reg.power_mgmt = POWER_MGMT_SAVE1;
}
hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
return 0;
}
static int ks_wlan_get_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
vwrq->power.disabled = (priv->reg.power_mgmt <= 0);
return 0;
}
static int ks_wlan_get_iwstats(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
vwrq->qual.qual = 0; /* not supported */
vwrq->qual.level = priv->wstats.qual.level;
vwrq->qual.noise = 0; /* not supported */
vwrq->qual.updated = 0;
return 0;
}
/* Note : this is deprecated in favor of IWSCAN */
static int ks_wlan_get_aplist(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct sockaddr *address = (struct sockaddr *)extra;
struct iw_quality qual[LOCAL_APLIST_MAX];
int i;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
for (i = 0; i < priv->aplist.size; i++) {
ether_addr_copy(address[i].sa_data, priv->aplist.ap[i].bssid);
address[i].sa_family = ARPHRD_ETHER;
qual[i].level = 256 - priv->aplist.ap[i].rssi;
qual[i].qual = priv->aplist.ap[i].sq;
qual[i].noise = 0; /* invalid noise value */
qual[i].updated = 7;
}
if (i) {
dwrq->data.flags = 1; /* Should be define'd */
memcpy(extra + sizeof(struct sockaddr) * i,
&qual, sizeof(struct iw_quality) * i);
}
dwrq->data.length = i;
return 0;
}
static int ks_wlan_set_scan(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_scan_req *req = NULL;
int len;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
/* specified SSID SCAN */
if (wrqu->data.length == sizeof(struct iw_scan_req) &&
wrqu->data.flags & IW_SCAN_THIS_ESSID) {
req = (struct iw_scan_req *)extra;
len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
priv->scan_ssid_len = len;
memcpy(priv->scan_ssid, req->essid, len);
} else {
priv->scan_ssid_len = 0;
}
priv->sme_i.sme_flag |= SME_AP_SCAN;
hostif_sme_enqueue(priv, SME_BSS_SCAN_REQUEST);
/* At this point, just return to the user. */
return 0;
}
static char *ks_wlan_add_leader_event(const char *rsn_leader, char *end_buf,
char *current_ev, struct rsn_ie *rsn,
struct iw_event *iwe,
struct iw_request_info *info)
{
char buffer[RSN_IE_BODY_MAX * 2 + 30];
char *pbuf;
int i;
pbuf = &buffer[0];
memset(iwe, 0, sizeof(*iwe));
iwe->cmd = IWEVCUSTOM;
memcpy(buffer, rsn_leader, sizeof(rsn_leader) - 1);
iwe->u.data.length += sizeof(rsn_leader) - 1;
pbuf += sizeof(rsn_leader) - 1;
pbuf += sprintf(pbuf, "%02x", rsn->id);
pbuf += sprintf(pbuf, "%02x", rsn->size);
iwe->u.data.length += 4;
for (i = 0; i < rsn->size; i++)
pbuf += sprintf(pbuf, "%02x", rsn->body[i]);
iwe->u.data.length += rsn->size * 2;
return iwe_stream_add_point(info, current_ev, end_buf, iwe, &buffer[0]);
}
/*
* Translate scan data returned from the card to a card independent
* format that the Wireless Tools will understand - Jean II
*/
static inline char *ks_wlan_translate_scan(struct net_device *dev,
struct iw_request_info *info,
char *current_ev, char *end_buf,
struct local_ap *ap)
{
/* struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; */
static const char rsn_leader[] = "rsn_ie=";
static const char wpa_leader[] = "wpa_ie=";
struct iw_event iwe; /* Temporary buffer */
u16 capabilities;
char *current_val; /* For rates */
int i;
/* First entry *MUST* be the AP MAC address */
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
ether_addr_copy(iwe.u.ap_addr.sa_data, ap->bssid);
current_ev = iwe_stream_add_event(info, current_ev,
end_buf, &iwe, IW_EV_ADDR_LEN);
/* Other entries will be displayed in the order we give them */
/* Add the ESSID */
iwe.u.data.length = ap->ssid.size;
if (iwe.u.data.length > 32)
iwe.u.data.length = 32;
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
current_ev = iwe_stream_add_point(info, current_ev,
end_buf, &iwe, ap->ssid.body);
/* Add mode */
iwe.cmd = SIOCGIWMODE;
capabilities = ap->capability;
if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
iwe.u.mode = (capabilities & WLAN_CAPABILITY_ESS) ?
IW_MODE_INFRA : IW_MODE_ADHOC;
current_ev = iwe_stream_add_event(info, current_ev,
end_buf, &iwe, IW_EV_UINT_LEN);
}
/* Add frequency */
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = ap->channel;
iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
iwe.u.freq.e = 1;
current_ev = iwe_stream_add_event(info, current_ev,
end_buf, &iwe, IW_EV_FREQ_LEN);
/* Add quality statistics */
iwe.cmd = IWEVQUAL;
iwe.u.qual.level = 256 - ap->rssi;
iwe.u.qual.qual = ap->sq;
iwe.u.qual.noise = 0; /* invalid noise value */
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_QUAL_LEN);
/* Add encryption capability */
iwe.cmd = SIOCGIWENCODE;
iwe.u.data.flags = (capabilities & WLAN_CAPABILITY_PRIVACY) ?
(IW_ENCODE_ENABLED | IW_ENCODE_NOKEY) :
IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
current_ev = iwe_stream_add_point(info, current_ev, end_buf,
&iwe, ap->ssid.body);
/*
* Rate : stuffing multiple values in a single event
* require a bit more of magic - Jean II
*/
current_val = current_ev + IW_EV_LCP_LEN;
iwe.cmd = SIOCGIWRATE;
/* These two flags are ignored... */
iwe.u.bitrate.fixed = 0;
iwe.u.bitrate.disabled = 0;
/* Max 16 values */
for (i = 0; i < 16; i++) {
/* NULL terminated */
if (i >= ap->rate_set.size)
break;
/* Bit rate given in 500 kb/s units (+ 0x80) */
iwe.u.bitrate.value = ((ap->rate_set.body[i] & 0x7f) * 500000);
/* Add new value to event */
current_val = iwe_stream_add_value(info, current_ev,
current_val, end_buf, &iwe,
IW_EV_PARAM_LEN);
}
/* Check if we added any event */
if ((current_val - current_ev) > IW_EV_LCP_LEN)
current_ev = current_val;
if (ap->rsn_ie.id == RSN_INFO_ELEM_ID && ap->rsn_ie.size != 0)
current_ev = ks_wlan_add_leader_event(rsn_leader, end_buf,
current_ev, &ap->rsn_ie,
&iwe, info);
if (ap->wpa_ie.id == WPA_INFO_ELEM_ID && ap->wpa_ie.size != 0)
current_ev = ks_wlan_add_leader_event(wpa_leader, end_buf,
current_ev, &ap->wpa_ie,
&iwe, info);
/*
* The other data in the scan result are not really
* interesting, so for now drop it - Jean II
*/
return current_ev;
}
static int ks_wlan_get_scan(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
int i;
char *current_ev = extra;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (priv->sme_i.sme_flag & SME_AP_SCAN)
return -EAGAIN;
if (priv->aplist.size == 0) {
/* Client error, no scan results...
* The caller need to restart the scan.
*/
return -ENODATA;
}
/* Read and parse all entries */
for (i = 0; i < priv->aplist.size; i++) {
if ((extra + dwrq->data.length) - current_ev <= IW_EV_ADDR_LEN) {
dwrq->data.length = 0;
return -E2BIG;
}
/* Translate to WE format this entry */
current_ev = ks_wlan_translate_scan(dev, info, current_ev,
extra + dwrq->data.length,
&priv->aplist.ap[i]);
}
/* Length of data */
dwrq->data.length = (current_ev - extra);
dwrq->data.flags = 0;
return 0;
}
/* called after a bunch of SET operations */
static int ks_wlan_config_commit(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *zwrq,
char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (!priv->need_commit)
return 0;
ks_wlan_setup_parameter(priv, priv->need_commit);
priv->need_commit = 0;
return 0;
}
/* set association ie params */
static int ks_wlan_set_genie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
return 0;
// return -EOPNOTSUPP;
}
static int ks_wlan_set_auth_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_param *param = &vwrq->param;
int index = (param->flags & IW_AUTH_INDEX);
int value = param->value;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
switch (index) {
case IW_AUTH_WPA_VERSION: /* 0 */
switch (value) {
case IW_AUTH_WPA_VERSION_DISABLED:
priv->wpa.version = value;
if (priv->wpa.rsn_enabled)
priv->wpa.rsn_enabled = false;
priv->need_commit |= SME_RSN;
break;
case IW_AUTH_WPA_VERSION_WPA:
case IW_AUTH_WPA_VERSION_WPA2:
priv->wpa.version = value;
if (!(priv->wpa.rsn_enabled))
priv->wpa.rsn_enabled = true;
priv->need_commit |= SME_RSN;
break;
default:
return -EOPNOTSUPP;
}
break;
case IW_AUTH_CIPHER_PAIRWISE: /* 1 */
switch (value) {
case IW_AUTH_CIPHER_NONE:
if (priv->reg.privacy_invoked) {
priv->reg.privacy_invoked = 0x00;
priv->need_commit |= SME_WEP_FLAG;
}
break;
case IW_AUTH_CIPHER_WEP40:
case IW_AUTH_CIPHER_TKIP:
case IW_AUTH_CIPHER_CCMP:
case IW_AUTH_CIPHER_WEP104:
if (!priv->reg.privacy_invoked) {
priv->reg.privacy_invoked = 0x01;
priv->need_commit |= SME_WEP_FLAG;
}
priv->wpa.pairwise_suite = value;
priv->need_commit |= SME_RSN_UNICAST;
break;
default:
return -EOPNOTSUPP;
}
break;
case IW_AUTH_CIPHER_GROUP: /* 2 */
switch (value) {
case IW_AUTH_CIPHER_NONE:
if (priv->reg.privacy_invoked) {
priv->reg.privacy_invoked = 0x00;
priv->need_commit |= SME_WEP_FLAG;
}
break;
case IW_AUTH_CIPHER_WEP40:
case IW_AUTH_CIPHER_TKIP:
case IW_AUTH_CIPHER_CCMP:
case IW_AUTH_CIPHER_WEP104:
if (!priv->reg.privacy_invoked) {
priv->reg.privacy_invoked = 0x01;
priv->need_commit |= SME_WEP_FLAG;
}
priv->wpa.group_suite = value;
priv->need_commit |= SME_RSN_MULTICAST;
break;
default:
return -EOPNOTSUPP;
}
break;
case IW_AUTH_KEY_MGMT: /* 3 */
switch (value) {
case IW_AUTH_KEY_MGMT_802_1X:
case IW_AUTH_KEY_MGMT_PSK:
case 0: /* NONE or 802_1X_NO_WPA */
case 4: /* WPA_NONE */
priv->wpa.key_mgmt_suite = value;
priv->need_commit |= SME_RSN_AUTH;
break;
default:
return -EOPNOTSUPP;
}
break;
case IW_AUTH_80211_AUTH_ALG: /* 6 */
switch (value) {
case IW_AUTH_ALG_OPEN_SYSTEM:
priv->wpa.auth_alg = value;
priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
break;
case IW_AUTH_ALG_SHARED_KEY:
priv->wpa.auth_alg = value;
priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
break;
case IW_AUTH_ALG_LEAP:
default:
return -EOPNOTSUPP;
}
priv->need_commit |= SME_MODE_SET;
break;
case IW_AUTH_WPA_ENABLED: /* 7 */
priv->wpa.wpa_enabled = value;
break;
case IW_AUTH_PRIVACY_INVOKED: /* 10 */
if ((value && !priv->reg.privacy_invoked) ||
(!value && priv->reg.privacy_invoked)) {
priv->reg.privacy_invoked = value ? 0x01 : 0x00;
priv->need_commit |= SME_WEP_FLAG;
}
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* 4 */
case IW_AUTH_TKIP_COUNTERMEASURES: /* 5 */
case IW_AUTH_DROP_UNENCRYPTED: /* 8 */
case IW_AUTH_ROAMING_CONTROL: /* 9 */
default:
break;
}
/* return -EINPROGRESS; */
if (priv->need_commit) {
ks_wlan_setup_parameter(priv, priv->need_commit);
priv->need_commit = 0;
}
return 0;
}
static int ks_wlan_get_auth_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_param *param = &vwrq->param;
int index = (param->flags & IW_AUTH_INDEX);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
/* WPA (not used ?? wpa_supplicant) */
switch (index) {
case IW_AUTH_WPA_VERSION:
param->value = priv->wpa.version;
break;
case IW_AUTH_CIPHER_PAIRWISE:
param->value = priv->wpa.pairwise_suite;
break;
case IW_AUTH_CIPHER_GROUP:
param->value = priv->wpa.group_suite;
break;
case IW_AUTH_KEY_MGMT:
param->value = priv->wpa.key_mgmt_suite;
break;
case IW_AUTH_80211_AUTH_ALG:
param->value = priv->wpa.auth_alg;
break;
case IW_AUTH_WPA_ENABLED:
param->value = priv->wpa.rsn_enabled;
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* OK??? */
case IW_AUTH_TKIP_COUNTERMEASURES:
case IW_AUTH_DROP_UNENCRYPTED:
default:
/* return -EOPNOTSUPP; */
break;
}
return 0;
}
/* set encoding token & mode (WPA)*/
static int ks_wlan_set_encode_ext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_encode_ext *enc;
int index = dwrq->encoding.flags & IW_ENCODE_INDEX;
unsigned int commit = 0;
struct wpa_key *key;
enc = (struct iw_encode_ext *)extra;
if (!enc)
return -EINVAL;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (index < 1 || index > 4)
return -EINVAL;
index--;
key = &priv->wpa.key[index];
if (dwrq->encoding.flags & IW_ENCODE_DISABLED)
key->key_len = 0;
key->ext_flags = enc->ext_flags;
if (enc->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
priv->wpa.txkey = index;
commit |= SME_WEP_INDEX;
} else if (enc->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
memcpy(&key->rx_seq[0], &enc->rx_seq[0], IW_ENCODE_SEQ_MAX_SIZE);
}
ether_addr_copy(&key->addr.sa_data[0], &enc->addr.sa_data[0]);
switch (enc->alg) {
case IW_ENCODE_ALG_NONE:
if (priv->reg.privacy_invoked) {
priv->reg.privacy_invoked = 0x00;
commit |= SME_WEP_FLAG;
}
key->key_len = 0;
break;
case IW_ENCODE_ALG_WEP:
case IW_ENCODE_ALG_CCMP:
if (!priv->reg.privacy_invoked) {
priv->reg.privacy_invoked = 0x01;
commit |= SME_WEP_FLAG;
}
if (enc->key_len) {
int key_len = clamp_val(enc->key_len, 0, IW_ENCODING_TOKEN_MAX);
memcpy(&key->key_val[0], &enc->key[0], key_len);
key->key_len = key_len;
commit |= (SME_WEP_VAL1 << index);
}
break;
case IW_ENCODE_ALG_TKIP:
if (!priv->reg.privacy_invoked) {
priv->reg.privacy_invoked = 0x01;
commit |= SME_WEP_FLAG;
}
if (enc->key_len == 32) {
memcpy(&key->key_val[0], &enc->key[0], enc->key_len - 16);
key->key_len = enc->key_len - 16;
if (priv->wpa.key_mgmt_suite == 4) { /* WPA_NONE */
memcpy(&key->tx_mic_key[0], &enc->key[16], 8);
memcpy(&key->rx_mic_key[0], &enc->key[16], 8);
} else {
memcpy(&key->tx_mic_key[0], &enc->key[16], 8);
memcpy(&key->rx_mic_key[0], &enc->key[24], 8);
}
commit |= (SME_WEP_VAL1 << index);
}
break;
default:
return -EINVAL;
}
key->alg = enc->alg;
if (commit) {
if (commit & SME_WEP_INDEX)
hostif_sme_enqueue(priv, SME_SET_TXKEY);
if (commit & SME_WEP_VAL_MASK)
hostif_sme_enqueue(priv, SME_SET_KEY1 + index);
if (commit & SME_WEP_FLAG)
hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
}
return 0;
}
/* get encoding token & mode (WPA)*/
static int ks_wlan_get_encode_ext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
/* WPA (not used ?? wpa_supplicant)
* struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
* struct iw_encode_ext *enc;
* enc = (struct iw_encode_ext *)extra;
* int index = dwrq->flags & IW_ENCODE_INDEX;
* WPA (not used ?? wpa_supplicant)
*/
return 0;
}
static int ks_wlan_set_pmksa(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_pmksa *pmksa;
int i;
struct pmk *pmk;
struct list_head *ptr;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (!extra)
return -EINVAL;
pmksa = (struct iw_pmksa *)extra;
switch (pmksa->cmd) {
case IW_PMKSA_ADD:
if (list_empty(&priv->pmklist.head)) {
for (i = 0; i < PMK_LIST_MAX; i++) {
pmk = &priv->pmklist.pmk[i];
if (is_zero_ether_addr(pmk->bssid))
break;
}
ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
list_add(&pmk->list, &priv->pmklist.head);
priv->pmklist.size++;
break;
}
/* search cache data */
list_for_each(ptr, &priv->pmklist.head) {
pmk = list_entry(ptr, struct pmk, list);
if (ether_addr_equal(pmksa->bssid.sa_data, pmk->bssid)) {
memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
list_move(&pmk->list, &priv->pmklist.head);
break;
}
}
/* not find address. */
if (ptr != &priv->pmklist.head)
break;
/* new cache data */
if (priv->pmklist.size < PMK_LIST_MAX) {
for (i = 0; i < PMK_LIST_MAX; i++) {
pmk = &priv->pmklist.pmk[i];
if (is_zero_ether_addr(pmk->bssid))
break;
}
ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
list_add(&pmk->list, &priv->pmklist.head);
priv->pmklist.size++;
} else { /* overwrite old cache data */
pmk = list_entry(priv->pmklist.head.prev, struct pmk,
list);
ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
list_move(&pmk->list, &priv->pmklist.head);
}
break;
case IW_PMKSA_REMOVE:
if (list_empty(&priv->pmklist.head))
return -EINVAL;
/* search cache data */
list_for_each(ptr, &priv->pmklist.head) {
pmk = list_entry(ptr, struct pmk, list);
if (ether_addr_equal(pmksa->bssid.sa_data, pmk->bssid)) {
eth_zero_addr(pmk->bssid);
memset(pmk->pmkid, 0, IW_PMKID_LEN);
list_del_init(&pmk->list);
break;
}
}
/* not find address. */
if (ptr == &priv->pmklist.head)
return 0;
break;
case IW_PMKSA_FLUSH:
memset(&priv->pmklist, 0, sizeof(priv->pmklist));
INIT_LIST_HEAD(&priv->pmklist.head);
for (i = 0; i < PMK_LIST_MAX; i++)
INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
break;
default:
return -EINVAL;
}
hostif_sme_enqueue(priv, SME_SET_PMKSA);
return 0;
}
static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_statistics *wstats = &priv->wstats;
if (!atomic_read(&update_phyinfo))
return (priv->dev_state < DEVICE_STATE_READY) ? NULL : wstats;
/*
* Packets discarded in the wireless adapter due to wireless
* specific problems
*/
wstats->discard.nwid = 0; /* Rx invalid nwid */
wstats->discard.code = 0; /* Rx invalid crypt */
wstats->discard.fragment = 0; /* Rx invalid frag */
wstats->discard.retries = 0; /* Tx excessive retries */
wstats->discard.misc = 0; /* Invalid misc */
wstats->miss.beacon = 0; /* Missed beacon */
return wstats;
}
static int ks_wlan_set_stop_request(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (!(uwrq->mode))
return -EINVAL;
hostif_sme_enqueue(priv, SME_STOP_REQUEST);
return 0;
}
#include <linux/ieee80211.h>
static int ks_wlan_set_mlme(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
union iwreq_data uwrq;
uwrq.mode = 1;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
if (mlme->cmd != IW_MLME_DEAUTH &&
mlme->cmd != IW_MLME_DISASSOC)
return -EOPNOTSUPP;
if (mlme->cmd == IW_MLME_DEAUTH &&
mlme->reason_code == WLAN_REASON_MIC_FAILURE)
return 0;
return ks_wlan_set_stop_request(dev, NULL, &uwrq, NULL);
}
static int ks_wlan_get_firmware_version(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct iw_point *dwrq = &uwrq->data;
struct ks_wlan_private *priv = netdev_priv(dev);
dwrq->length = priv->version_size + 1;
strscpy(extra, priv->firmware_version, dwrq->length);
return 0;
}
static int ks_wlan_set_preamble(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (uwrq->mode != LONG_PREAMBLE && uwrq->mode != SHORT_PREAMBLE)
return -EINVAL;
priv->reg.preamble = uwrq->mode;
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
static int ks_wlan_get_preamble(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
uwrq->mode = priv->reg.preamble;
return 0;
}
static int ks_wlan_set_power_mgmt(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
if (uwrq->mode != POWER_MGMT_ACTIVE &&
uwrq->mode != POWER_MGMT_SAVE1 &&
uwrq->mode != POWER_MGMT_SAVE2)
return -EINVAL;
if ((uwrq->mode == POWER_MGMT_SAVE1 || uwrq->mode == POWER_MGMT_SAVE2) &&
(priv->reg.operation_mode != MODE_INFRASTRUCTURE))
return -EINVAL;
priv->reg.power_mgmt = uwrq->mode;
hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
return 0;
}
static int ks_wlan_get_power_mgmt(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
uwrq->mode = priv->reg.power_mgmt;
return 0;
}
static int ks_wlan_set_scan_type(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (uwrq->mode != ACTIVE_SCAN && uwrq->mode != PASSIVE_SCAN)
return -EINVAL;
priv->reg.scan_type = uwrq->mode;
return 0;
}
static int ks_wlan_get_scan_type(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
uwrq->mode = priv->reg.scan_type;
return 0;
}
static int ks_wlan_set_beacon_lost(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (uwrq->mode > BEACON_LOST_COUNT_MAX)
return -EINVAL;
priv->reg.beacon_lost_count = uwrq->mode;
if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
return 0;
}
static int ks_wlan_get_beacon_lost(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
uwrq->mode = priv->reg.beacon_lost_count;
return 0;
}
static int ks_wlan_set_phy_type(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
if (uwrq->mode != D_11B_ONLY_MODE &&
uwrq->mode != D_11G_ONLY_MODE &&
uwrq->mode != D_11BG_COMPATIBLE_MODE)
return -EINVAL;
/* for SLEEP MODE */
priv->reg.phy_type = uwrq->mode;
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
static int ks_wlan_get_phy_type(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
uwrq->mode = priv->reg.phy_type;
return 0;
}
static int ks_wlan_set_cts_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (uwrq->mode != CTS_MODE_FALSE && uwrq->mode != CTS_MODE_TRUE)
return -EINVAL;
priv->reg.cts_mode = (uwrq->mode == CTS_MODE_FALSE) ? uwrq->mode :
(priv->reg.phy_type == D_11G_ONLY_MODE ||
priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) ?
uwrq->mode : !uwrq->mode;
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
static int ks_wlan_get_cts_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
uwrq->mode = priv->reg.cts_mode;
return 0;
}
static int ks_wlan_set_sleep_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (uwrq->mode != SLP_SLEEP &&
uwrq->mode != SLP_ACTIVE) {
netdev_err(dev, "SET_SLEEP_MODE %d error\n", uwrq->mode);
return -EINVAL;
}
priv->sleep_mode = uwrq->mode;
netdev_info(dev, "SET_SLEEP_MODE %d\n", priv->sleep_mode);
if (uwrq->mode == SLP_SLEEP)
hostif_sme_enqueue(priv, SME_STOP_REQUEST);
hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
return 0;
}
static int ks_wlan_get_sleep_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
uwrq->mode = priv->sleep_mode;
return 0;
}
static int ks_wlan_set_wps_enable(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (uwrq->mode != 0 && uwrq->mode != 1)
return -EINVAL;
priv->wps.wps_enabled = uwrq->mode;
hostif_sme_enqueue(priv, SME_WPS_ENABLE_REQUEST);
return 0;
}
static int ks_wlan_get_wps_enable(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
uwrq->mode = priv->wps.wps_enabled;
netdev_info(dev, "return=%d\n", uwrq->mode);
return 0;
}
static int ks_wlan_set_wps_probe_req(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct iw_point *dwrq = &uwrq->data;
u8 *p = extra;
unsigned char len;
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* length check */
if (p[1] + 2 != dwrq->length || dwrq->length > 256)
return -EINVAL;
priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */
len = p[1] + 2; /* IE header + IE */
memcpy(priv->wps.ie, &len, sizeof(len));
p = memcpy(priv->wps.ie + 1, p, len);
netdev_dbg(dev, "%d(%#x): %02X %02X %02X %02X ... %02X %02X %02X\n",
priv->wps.ielen, priv->wps.ielen, p[0], p[1], p[2], p[3],
p[priv->wps.ielen - 3], p[priv->wps.ielen - 2],
p[priv->wps.ielen - 1]);
hostif_sme_enqueue(priv, SME_WPS_PROBE_REQUEST);
return 0;
}
static int ks_wlan_set_tx_gain(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (uwrq->mode > 0xFF)
return -EINVAL;
priv->gain.tx_gain = (u8)uwrq->mode;
priv->gain.tx_mode = (priv->gain.tx_gain < 0xFF) ? 1 : 0;
hostif_sme_enqueue(priv, SME_SET_GAIN);
return 0;
}
static int ks_wlan_get_tx_gain(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
uwrq->mode = priv->gain.tx_gain;
hostif_sme_enqueue(priv, SME_GET_GAIN);
return 0;
}
static int ks_wlan_set_rx_gain(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
if (uwrq->mode > 0xFF)
return -EINVAL;
priv->gain.rx_gain = (u8)uwrq->mode;
priv->gain.rx_mode = (priv->gain.rx_gain < 0xFF) ? 1 : 0;
hostif_sme_enqueue(priv, SME_SET_GAIN);
return 0;
}
static int ks_wlan_get_rx_gain(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
uwrq->mode = priv->gain.rx_gain;
hostif_sme_enqueue(priv, SME_GET_GAIN);
return 0;
}
static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
uwrq->mode = priv->eeprom_checksum;
return 0;
}
static void print_hif_event(struct net_device *dev, int event)
{
switch (event) {
case HIF_DATA_REQ:
netdev_info(dev, "HIF_DATA_REQ\n");
break;
case HIF_DATA_IND:
netdev_info(dev, "HIF_DATA_IND\n");
break;
case HIF_MIB_GET_REQ:
netdev_info(dev, "HIF_MIB_GET_REQ\n");
break;
case HIF_MIB_GET_CONF:
netdev_info(dev, "HIF_MIB_GET_CONF\n");
break;
case HIF_MIB_SET_REQ:
netdev_info(dev, "HIF_MIB_SET_REQ\n");
break;
case HIF_MIB_SET_CONF:
netdev_info(dev, "HIF_MIB_SET_CONF\n");
break;
case HIF_POWER_MGMT_REQ:
netdev_info(dev, "HIF_POWER_MGMT_REQ\n");
break;
case HIF_POWER_MGMT_CONF:
netdev_info(dev, "HIF_POWER_MGMT_CONF\n");
break;
case HIF_START_REQ:
netdev_info(dev, "HIF_START_REQ\n");
break;
case HIF_START_CONF:
netdev_info(dev, "HIF_START_CONF\n");
break;
case HIF_CONNECT_IND:
netdev_info(dev, "HIF_CONNECT_IND\n");
break;
case HIF_STOP_REQ:
netdev_info(dev, "HIF_STOP_REQ\n");
break;
case HIF_STOP_CONF:
netdev_info(dev, "HIF_STOP_CONF\n");
break;
case HIF_PS_ADH_SET_REQ:
netdev_info(dev, "HIF_PS_ADH_SET_REQ\n");
break;
case HIF_PS_ADH_SET_CONF:
netdev_info(dev, "HIF_PS_ADH_SET_CONF\n");
break;
case HIF_INFRA_SET_REQ:
netdev_info(dev, "HIF_INFRA_SET_REQ\n");
break;
case HIF_INFRA_SET_CONF:
netdev_info(dev, "HIF_INFRA_SET_CONF\n");
break;
case HIF_ADH_SET_REQ:
netdev_info(dev, "HIF_ADH_SET_REQ\n");
break;
case HIF_ADH_SET_CONF:
netdev_info(dev, "HIF_ADH_SET_CONF\n");
break;
case HIF_AP_SET_REQ:
netdev_info(dev, "HIF_AP_SET_REQ\n");
break;
case HIF_AP_SET_CONF:
netdev_info(dev, "HIF_AP_SET_CONF\n");
break;
case HIF_ASSOC_INFO_IND:
netdev_info(dev, "HIF_ASSOC_INFO_IND\n");
break;
case HIF_MIC_FAILURE_REQ:
netdev_info(dev, "HIF_MIC_FAILURE_REQ\n");
break;
case HIF_MIC_FAILURE_CONF:
netdev_info(dev, "HIF_MIC_FAILURE_CONF\n");
break;
case HIF_SCAN_REQ:
netdev_info(dev, "HIF_SCAN_REQ\n");
break;
case HIF_SCAN_CONF:
netdev_info(dev, "HIF_SCAN_CONF\n");
break;
case HIF_PHY_INFO_REQ:
netdev_info(dev, "HIF_PHY_INFO_REQ\n");
break;
case HIF_PHY_INFO_CONF:
netdev_info(dev, "HIF_PHY_INFO_CONF\n");
break;
case HIF_SLEEP_REQ:
netdev_info(dev, "HIF_SLEEP_REQ\n");
break;
case HIF_SLEEP_CONF:
netdev_info(dev, "HIF_SLEEP_CONF\n");
break;
case HIF_PHY_INFO_IND:
netdev_info(dev, "HIF_PHY_INFO_IND\n");
break;
case HIF_SCAN_IND:
netdev_info(dev, "HIF_SCAN_IND\n");
break;
case HIF_INFRA_SET2_REQ:
netdev_info(dev, "HIF_INFRA_SET2_REQ\n");
break;
case HIF_INFRA_SET2_CONF:
netdev_info(dev, "HIF_INFRA_SET2_CONF\n");
break;
case HIF_ADH_SET2_REQ:
netdev_info(dev, "HIF_ADH_SET2_REQ\n");
break;
case HIF_ADH_SET2_CONF:
netdev_info(dev, "HIF_ADH_SET2_CONF\n");
}
}
/* get host command history */
static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
int i, event;
struct ks_wlan_private *priv = netdev_priv(dev);
for (i = 63; i >= 0; i--) {
event =
priv->hostt.buff[(priv->hostt.qtail - 1 - i) %
SME_EVENT_BUFF_SIZE];
print_hif_event(dev, event);
}
return 0;
}
/* Structures to export the Wireless Handlers */
static const struct iw_priv_args ks_wlan_private_args[] = {
/*{ cmd, set_args, get_args, name[16] } */
{KS_WLAN_GET_FIRM_VERSION, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_CHAR | (128 + 1), "GetFirmwareVer"},
{KS_WLAN_SET_WPS_ENABLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetWPSEnable"},
{KS_WLAN_GET_WPS_ENABLE, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetW"},
{KS_WLAN_SET_WPS_PROBE_REQ, IW_PRIV_TYPE_BYTE | 2047, IW_PRIV_TYPE_NONE,
"SetWPSProbeReq"},
{KS_WLAN_SET_PREAMBLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetPreamble"},
{KS_WLAN_GET_PREAMBLE, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPreamble"},
{KS_WLAN_SET_POWER_SAVE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetPowerSave"},
{KS_WLAN_GET_POWER_SAVE, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPowerSave"},
{KS_WLAN_SET_SCAN_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetScanType"},
{KS_WLAN_GET_SCAN_TYPE, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetScanType"},
{KS_WLAN_SET_RX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetRxGain"},
{KS_WLAN_GET_RX_GAIN, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetRxGain"},
{KS_WLAN_HOSTT, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_CHAR | (128 + 1),
"hostt"},
{KS_WLAN_SET_BEACON_LOST, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetBeaconLost"},
{KS_WLAN_GET_BEACON_LOST, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetBeaconLost"},
{KS_WLAN_SET_SLEEP_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetSleepMode"},
{KS_WLAN_GET_SLEEP_MODE, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetSleepMode"},
{KS_WLAN_SET_TX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetTxGain"},
{KS_WLAN_GET_TX_GAIN, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetTxGain"},
{KS_WLAN_SET_PHY_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetPhyType"},
{KS_WLAN_GET_PHY_TYPE, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPhyType"},
{KS_WLAN_SET_CTS_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetCtsMode"},
{KS_WLAN_GET_CTS_MODE, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetCtsMode"},
{KS_WLAN_GET_EEPROM_CKSUM, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetChecksum"},
};
static const iw_handler ks_wlan_handler[] = {
IW_HANDLER(SIOCSIWCOMMIT, ks_wlan_config_commit),
IW_HANDLER(SIOCGIWNAME, ks_wlan_get_name),
IW_HANDLER(SIOCSIWFREQ, ks_wlan_set_freq),
IW_HANDLER(SIOCGIWFREQ, ks_wlan_get_freq),
IW_HANDLER(SIOCSIWMODE, ks_wlan_set_mode),
IW_HANDLER(SIOCGIWMODE, ks_wlan_get_mode),
IW_HANDLER(SIOCGIWRANGE, ks_wlan_get_range),
IW_HANDLER(SIOCGIWSTATS, ks_wlan_get_iwstats),
IW_HANDLER(SIOCSIWAP, ks_wlan_set_wap),
IW_HANDLER(SIOCGIWAP, ks_wlan_get_wap),
IW_HANDLER(SIOCSIWMLME, ks_wlan_set_mlme),
IW_HANDLER(SIOCGIWAPLIST, ks_wlan_get_aplist),
IW_HANDLER(SIOCSIWSCAN, ks_wlan_set_scan),
IW_HANDLER(SIOCGIWSCAN, ks_wlan_get_scan),
IW_HANDLER(SIOCSIWESSID, ks_wlan_set_essid),
IW_HANDLER(SIOCGIWESSID, ks_wlan_get_essid),
IW_HANDLER(SIOCSIWNICKN, ks_wlan_set_nick),
IW_HANDLER(SIOCGIWNICKN, ks_wlan_get_nick),
IW_HANDLER(SIOCSIWRATE, ks_wlan_set_rate),
IW_HANDLER(SIOCGIWRATE, ks_wlan_get_rate),
IW_HANDLER(SIOCSIWRTS, ks_wlan_set_rts),
IW_HANDLER(SIOCGIWRTS, ks_wlan_get_rts),
IW_HANDLER(SIOCSIWFRAG, ks_wlan_set_frag),
IW_HANDLER(SIOCGIWFRAG, ks_wlan_get_frag),
IW_HANDLER(SIOCSIWENCODE, ks_wlan_set_encode),
IW_HANDLER(SIOCGIWENCODE, ks_wlan_get_encode),
IW_HANDLER(SIOCSIWPOWER, ks_wlan_set_power),
IW_HANDLER(SIOCGIWPOWER, ks_wlan_get_power),
IW_HANDLER(SIOCSIWGENIE, ks_wlan_set_genie),
IW_HANDLER(SIOCSIWAUTH, ks_wlan_set_auth_mode),
IW_HANDLER(SIOCGIWAUTH, ks_wlan_get_auth_mode),
IW_HANDLER(SIOCSIWENCODEEXT, ks_wlan_set_encode_ext),
IW_HANDLER(SIOCGIWENCODEEXT, ks_wlan_get_encode_ext),
IW_HANDLER(SIOCSIWPMKSA, ks_wlan_set_pmksa),
};
/* private_handler */
static const iw_handler ks_wlan_private_handler[] = {
NULL, /* 0 */
NULL, /* 1, KS_WLAN_GET_DRIVER_VERSION */
NULL, /* 2 */
ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */
ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */
ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */
ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */
ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */
ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */
ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */
ks_wlan_set_power_mgmt, /* 10 KS_WLAN_SET_POWER_SAVE */
ks_wlan_get_power_mgmt, /* 11 KS_WLAN_GET_POWER_SAVE */
ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */
ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */
ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */
ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */
ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */
NULL, /* 17 */
ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */
ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */
ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */
ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */
ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */
ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */
ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */
ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */
NULL, /* 26 */
NULL, /* 27 */
ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */
ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */
NULL, /* 30 */
NULL, /* 31 */
};
static const struct iw_handler_def ks_wlan_handler_def = {
.num_standard = ARRAY_SIZE(ks_wlan_handler),
.num_private = ARRAY_SIZE(ks_wlan_private_handler),
.num_private_args = ARRAY_SIZE(ks_wlan_private_args),
.standard = ks_wlan_handler,
.private = ks_wlan_private_handler,
.private_args = ks_wlan_private_args,
.get_wireless_stats = ks_get_wireless_stats,
};
static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
int cmd)
{
int ret;
struct iwreq *wrq = (struct iwreq *)rq;
switch (cmd) {
case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */
ret = ks_wlan_set_stop_request(dev, NULL, &wrq->u, NULL);
break;
// All other calls are currently unsupported
default:
ret = -EOPNOTSUPP;
}
return ret;
}
static
struct net_device_stats *ks_wlan_get_stats(struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->dev_state < DEVICE_STATE_READY)
return NULL; /* not finished initialize */
return &priv->nstats;
}
static
int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct sockaddr *mac_addr = (struct sockaddr *)addr;
if (netif_running(dev))
return -EBUSY;
eth_hw_addr_set(dev, mac_addr->sa_data);
ether_addr_copy(priv->eth_addr, mac_addr->sa_data);
priv->mac_address_valid = false;
hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
netdev_info(dev, "ks_wlan: MAC ADDRESS = %pM\n", priv->eth_addr);
return 0;
}
static
void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ks_wlan_private *priv = netdev_priv(dev);
netdev_dbg(dev, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead,
priv->tx_dev.qtail);
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
priv->nstats.tx_errors++;
netif_wake_queue(dev);
}
static
netdev_tx_t ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
int ret;
netdev_dbg(dev, "in_interrupt()=%ld\n", in_interrupt());
if (!skb) {
netdev_err(dev, "ks_wlan: skb == NULL!!!\n");
return 0;
}
if (priv->dev_state < DEVICE_STATE_READY) {
dev_kfree_skb(skb);
return 0; /* not finished initialize */
}
if (netif_running(dev))
netif_stop_queue(dev);
ret = hostif_data_request(priv, skb);
netif_trans_update(dev);
if (ret)
netdev_err(dev, "hostif_data_request error: =%d\n", ret);
return 0;
}
void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb)
{
priv->nstats.tx_packets++;
if (netif_queue_stopped(priv->net_dev))
netif_wake_queue(priv->net_dev);
if (skb) {
priv->nstats.tx_bytes += skb->len;
dev_kfree_skb(skb);
}
}
/*
* Set or clear the multicast filter for this adaptor.
* This routine is not state sensitive and need not be SMP locked.
*/
static
void ks_wlan_set_rx_mode(struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->dev_state < DEVICE_STATE_READY)
return; /* not finished initialize */
hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
}
static
int ks_wlan_open(struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
priv->cur_rx = 0;
if (!priv->mac_address_valid) {
netdev_err(dev, "ks_wlan : %s Not READY !!\n", dev->name);
return -EBUSY;
}
netif_start_queue(dev);
return 0;
}
static
int ks_wlan_close(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (3 * HZ)
static const unsigned char dummy_addr[] = {
0x00, 0x0b, 0xe3, 0x00, 0x00, 0x00
};
static const struct net_device_ops ks_wlan_netdev_ops = {
.ndo_start_xmit = ks_wlan_start_xmit,
.ndo_open = ks_wlan_open,
.ndo_stop = ks_wlan_close,
.ndo_do_ioctl = ks_wlan_netdev_ioctl,
.ndo_set_mac_address = ks_wlan_set_mac_address,
.ndo_get_stats = ks_wlan_get_stats,
.ndo_tx_timeout = ks_wlan_tx_timeout,
.ndo_set_rx_mode = ks_wlan_set_rx_mode,
};
int ks_wlan_net_start(struct net_device *dev)
{
struct ks_wlan_private *priv;
/* int rc; */
priv = netdev_priv(dev);
priv->mac_address_valid = false;
priv->is_device_open = true;
priv->need_commit = 0;
/* phy information update timer */
atomic_set(&update_phyinfo, 0);
timer_setup(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout, 0);
/* dummy address set */
ether_addr_copy(priv->eth_addr, dummy_addr);
eth_hw_addr_set(dev, priv->eth_addr);
/* The ks_wlan-specific entries in the device structure. */
dev->netdev_ops = &ks_wlan_netdev_ops;
dev->wireless_handlers = &ks_wlan_handler_def;
dev->watchdog_timeo = TX_TIMEOUT;
netif_carrier_off(dev);
return 0;
}
int ks_wlan_net_stop(struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
priv->is_device_open = false;
del_timer_sync(&update_phyinfo_timer);
if (netif_running(dev))
netif_stop_queue(dev);
return 0;
}
/**
* is_connect_status() - return true if status is 'connected'
* @status: high bit is used as FORCE_DISCONNECT, low bits used for
* connect status.
*/
bool is_connect_status(u32 status)
{
return (status & CONNECT_STATUS_MASK) == CONNECT_STATUS;
}
/**
* is_disconnect_status() - return true if status is 'disconnected'
* @status: high bit is used as FORCE_DISCONNECT, low bits used for
* disconnect status.
*/
bool is_disconnect_status(u32 status)
{
return (status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS;
}
| linux-master | drivers/staging/ks7010/ks_wlan_net.c |
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
#include <linux/crc32.h>
#include <drv_types.h>
#include <rtw_debug.h>
#include <crypto/aes.h>
static const char * const _security_type_str[] = {
"N/A",
"WEP40",
"TKIP",
"TKIP_WM",
"AES",
"WEP104",
"SMS4",
"WEP_WPA",
"BIP",
};
const char *security_type_str(u8 value)
{
if (value <= _BIP_)
return _security_type_str[value];
return NULL;
}
/* WEP related ===== */
/*
Need to consider the fragment situation
*/
void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
{ /* exclude ICV */
union {
__le32 f0;
unsigned char f1[4];
} crc;
signed int curfragnum, length;
u32 keylength;
u8 *pframe, *payload, *iv; /* wepkey */
u8 wepkey[16];
u8 hw_hdr_offset = 0;
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct arc4_ctx *ctx = &psecuritypriv->xmit_arc4_ctx;
if (!((struct xmit_frame *)pxmitframe)->buf_addr)
return;
hw_hdr_offset = TXDESC_OFFSET;
pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
/* start to encrypt each fragment */
if ((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) {
keylength = psecuritypriv->dot11DefKeylen[psecuritypriv->dot11PrivacyKeyIndex];
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
iv = pframe+pattrib->hdrlen;
memcpy(&wepkey[0], iv, 3);
memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[psecuritypriv->dot11PrivacyKeyIndex].skey[0], keylength);
payload = pframe+pattrib->iv_len+pattrib->hdrlen;
if ((curfragnum+1) == pattrib->nr_frags) { /* the last fragment */
length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
arc4_setkey(ctx, wepkey, 3 + keylength);
arc4_crypt(ctx, payload, payload, length);
arc4_crypt(ctx, payload + length, crc.f1, 4);
} else {
length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
arc4_setkey(ctx, wepkey, 3 + keylength);
arc4_crypt(ctx, payload, payload, length);
arc4_crypt(ctx, payload + length, crc.f1, 4);
pframe += pxmitpriv->frag_len;
pframe = (u8 *)round_up((SIZE_PTR)(pframe), 4);
}
}
}
}
void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
{
/* exclude ICV */
u8 crc[4];
signed int length;
u32 keylength;
u8 *pframe, *payload, *iv, wepkey[16];
u8 keyindex;
struct rx_pkt_attrib *prxattrib = &(((union recv_frame *)precvframe)->u.hdr.attrib);
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct arc4_ctx *ctx = &psecuritypriv->recv_arc4_ctx;
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
/* start to decrypt recvframe */
if ((prxattrib->encrypt == _WEP40_) || (prxattrib->encrypt == _WEP104_)) {
iv = pframe+prxattrib->hdrlen;
/* keyindex =(iv[3]&0x3); */
keyindex = prxattrib->key_index;
keylength = psecuritypriv->dot11DefKeylen[keyindex];
memcpy(&wepkey[0], iv, 3);
/* memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[psecuritypriv->dot11PrivacyKeyIndex].skey[0], keylength); */
memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[keyindex].skey[0], keylength);
length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
/* decrypt payload include icv */
arc4_setkey(ctx, wepkey, 3 + keylength);
arc4_crypt(ctx, payload, payload, length);
/* calculate icv and compare the icv */
*((u32 *)crc) = ~crc32_le(~0, payload, length - 4);
}
}
/* 3 =====TKIP related ===== */
static u32 secmicgetuint32(u8 *p)
/* Convert from Byte[] to Us3232 in a portable way */
{
s32 i;
u32 res = 0;
for (i = 0; i < 4; i++)
res |= ((u32)(*p++)) << (8 * i);
return res;
}
static void secmicputuint32(u8 *p, u32 val)
/* Convert from Us3232 to Byte[] in a portable way */
{
long i;
for (i = 0; i < 4; i++) {
*p++ = (u8) (val & 0xff);
val >>= 8;
}
}
static void secmicclear(struct mic_data *pmicdata)
{
/* Reset the state to the empty message. */
pmicdata->L = pmicdata->K0;
pmicdata->R = pmicdata->K1;
pmicdata->nBytesInM = 0;
pmicdata->M = 0;
}
void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key)
{
/* Set the key */
pmicdata->K0 = secmicgetuint32(key);
pmicdata->K1 = secmicgetuint32(key + 4);
/* and reset the message */
secmicclear(pmicdata);
}
void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b)
{
/* Append the byte to our word-sized buffer */
pmicdata->M |= ((unsigned long)b) << (8*pmicdata->nBytesInM);
pmicdata->nBytesInM++;
/* Process the word if it is full. */
if (pmicdata->nBytesInM >= 4) {
pmicdata->L ^= pmicdata->M;
pmicdata->R ^= ROL32(pmicdata->L, 17);
pmicdata->L += pmicdata->R;
pmicdata->R ^= ((pmicdata->L & 0xff00ff00) >> 8) | ((pmicdata->L & 0x00ff00ff) << 8);
pmicdata->L += pmicdata->R;
pmicdata->R ^= ROL32(pmicdata->L, 3);
pmicdata->L += pmicdata->R;
pmicdata->R ^= ROR32(pmicdata->L, 2);
pmicdata->L += pmicdata->R;
/* Clear the buffer */
pmicdata->M = 0;
pmicdata->nBytesInM = 0;
}
}
void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nbytes)
{
/* This is simple */
while (nbytes > 0) {
rtw_secmicappendbyte(pmicdata, *src++);
nbytes--;
}
}
void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst)
{
/* Append the minimum padding */
rtw_secmicappendbyte(pmicdata, 0x5a);
rtw_secmicappendbyte(pmicdata, 0);
rtw_secmicappendbyte(pmicdata, 0);
rtw_secmicappendbyte(pmicdata, 0);
rtw_secmicappendbyte(pmicdata, 0);
/* and then zeroes until the length is a multiple of 4 */
while (pmicdata->nBytesInM != 0)
rtw_secmicappendbyte(pmicdata, 0);
/* The appendByte function has already computed the result. */
secmicputuint32(dst, pmicdata->L);
secmicputuint32(dst + 4, pmicdata->R);
/* Reset to the empty message. */
secmicclear(pmicdata);
}
void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_code, u8 pri)
{
struct mic_data micdata;
u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
rtw_secmicsetkey(&micdata, key);
priority[0] = pri;
/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
if (header[1] & 1) { /* ToDS == 1 */
rtw_secmicappend(&micdata, &header[16], 6); /* DA */
if (header[1] & 2) /* From Ds == 1 */
rtw_secmicappend(&micdata, &header[24], 6);
else
rtw_secmicappend(&micdata, &header[10], 6);
} else { /* ToDS == 0 */
rtw_secmicappend(&micdata, &header[4], 6); /* DA */
if (header[1] & 2) /* From Ds == 1 */
rtw_secmicappend(&micdata, &header[16], 6);
else
rtw_secmicappend(&micdata, &header[10], 6);
}
rtw_secmicappend(&micdata, &priority[0], 4);
rtw_secmicappend(&micdata, data, data_len);
rtw_secgetmic(&micdata, mic_code);
}
/* macros for extraction/creation of unsigned char/unsigned short values */
#define RotR1(v16) ((((v16) >> 1) & 0x7FFF) ^ (((v16) & 1) << 15))
#define Lo8(v16) ((u8)((v16) & 0x00FF))
#define Hi8(v16) ((u8)(((v16) >> 8) & 0x00FF))
#define Lo16(v32) ((u16)((v32) & 0xFFFF))
#define Hi16(v32) ((u16)(((v32) >> 16) & 0xFFFF))
#define Mk16(hi, lo) ((lo) ^ (((u16)(hi)) << 8))
/* select the Nth 16-bit word of the temporal key unsigned char array TK[] */
#define TK16(N) Mk16(tk[2*(N)+1], tk[2*(N)])
/* S-box lookup: 16 bits --> 16 bits */
#define _S_(v16) (Sbox1[0][Lo8(v16)] ^ Sbox1[1][Hi8(v16)])
/* fixed algorithm "parameters" */
#define PHASE1_LOOP_CNT 8 /* this needs to be "big enough" */
/* 2-unsigned char by 2-unsigned char subset of the full AES S-box table */
static const unsigned short Sbox1[2][256] = { /* Sbox for hash (can be in ROM) */
{
0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
},
{ /* second half of table is unsigned char-reversed version of first! */
0xA5C6, 0x84F8, 0x99EE, 0x8DF6, 0x0DFF, 0xBDD6, 0xB1DE, 0x5491,
0x5060, 0x0302, 0xA9CE, 0x7D56, 0x19E7, 0x62B5, 0xE64D, 0x9AEC,
0x458F, 0x9D1F, 0x4089, 0x87FA, 0x15EF, 0xEBB2, 0xC98E, 0x0BFB,
0xEC41, 0x67B3, 0xFD5F, 0xEA45, 0xBF23, 0xF753, 0x96E4, 0x5B9B,
0xC275, 0x1CE1, 0xAE3D, 0x6A4C, 0x5A6C, 0x417E, 0x02F5, 0x4F83,
0x5C68, 0xF451, 0x34D1, 0x08F9, 0x93E2, 0x73AB, 0x5362, 0x3F2A,
0x0C08, 0x5295, 0x6546, 0x5E9D, 0x2830, 0xA137, 0x0F0A, 0xB52F,
0x090E, 0x3624, 0x9B1B, 0x3DDF, 0x26CD, 0x694E, 0xCD7F, 0x9FEA,
0x1B12, 0x9E1D, 0x7458, 0x2E34, 0x2D36, 0xB2DC, 0xEEB4, 0xFB5B,
0xF6A4, 0x4D76, 0x61B7, 0xCE7D, 0x7B52, 0x3EDD, 0x715E, 0x9713,
0xF5A6, 0x68B9, 0x0000, 0x2CC1, 0x6040, 0x1FE3, 0xC879, 0xEDB6,
0xBED4, 0x468D, 0xD967, 0x4B72, 0xDE94, 0xD498, 0xE8B0, 0x4A85,
0x6BBB, 0x2AC5, 0xE54F, 0x16ED, 0xC586, 0xD79A, 0x5566, 0x9411,
0xCF8A, 0x10E9, 0x0604, 0x81FE, 0xF0A0, 0x4478, 0xBA25, 0xE34B,
0xF3A2, 0xFE5D, 0xC080, 0x8A05, 0xAD3F, 0xBC21, 0x4870, 0x04F1,
0xDF63, 0xC177, 0x75AF, 0x6342, 0x3020, 0x1AE5, 0x0EFD, 0x6DBF,
0x4C81, 0x1418, 0x3526, 0x2FC3, 0xE1BE, 0xA235, 0xCC88, 0x392E,
0x5793, 0xF255, 0x82FC, 0x477A, 0xACC8, 0xE7BA, 0x2B32, 0x95E6,
0xA0C0, 0x9819, 0xD19E, 0x7FA3, 0x6644, 0x7E54, 0xAB3B, 0x830B,
0xCA8C, 0x29C7, 0xD36B, 0x3C28, 0x79A7, 0xE2BC, 0x1D16, 0x76AD,
0x3BDB, 0x5664, 0x4E74, 0x1E14, 0xDB92, 0x0A0C, 0x6C48, 0xE4B8,
0x5D9F, 0x6EBD, 0xEF43, 0xA6C4, 0xA839, 0xA431, 0x37D3, 0x8BF2,
0x32D5, 0x438B, 0x596E, 0xB7DA, 0x8C01, 0x64B1, 0xD29C, 0xE049,
0xB4D8, 0xFAAC, 0x07F3, 0x25CF, 0xAFCA, 0x8EF4, 0xE947, 0x1810,
0xD56F, 0x88F0, 0x6F4A, 0x725C, 0x2438, 0xF157, 0xC773, 0x5197,
0x23CB, 0x7CA1, 0x9CE8, 0x213E, 0xDD96, 0xDC61, 0x860D, 0x850F,
0x90E0, 0x427C, 0xC471, 0xAACC, 0xD890, 0x0506, 0x01F7, 0x121C,
0xA3C2, 0x5F6A, 0xF9AE, 0xD069, 0x9117, 0x5899, 0x273A, 0xB927,
0x38D9, 0x13EB, 0xB32B, 0x3322, 0xBBD2, 0x70A9, 0x8907, 0xA733,
0xB62D, 0x223C, 0x9215, 0x20C9, 0x4987, 0xFFAA, 0x7850, 0x7AA5,
0x8F03, 0xF859, 0x8009, 0x171A, 0xDA65, 0x31D7, 0xC684, 0xB8D0,
0xC382, 0xB029, 0x775A, 0x111E, 0xCB7B, 0xFCA8, 0xD66D, 0x3A2C,
}
};
/*
**********************************************************************
* Routine: Phase 1 -- generate P1K, given TA, TK, IV32
*
* Inputs:
* tk[] = temporal key [128 bits]
* ta[] = transmitter's MAC address [ 48 bits]
* iv32 = upper 32 bits of IV [ 32 bits]
* Output:
* p1k[] = Phase 1 key [ 80 bits]
*
* Note:
* This function only needs to be called every 2**16 packets,
* although in theory it could be called every packet.
*
**********************************************************************
*/
static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
{
signed int i;
/* Initialize the 80 bits of P1K[] from IV32 and TA[0..5] */
p1k[0] = Lo16(iv32);
p1k[1] = Hi16(iv32);
p1k[2] = Mk16(ta[1], ta[0]); /* use TA[] as little-endian */
p1k[3] = Mk16(ta[3], ta[2]);
p1k[4] = Mk16(ta[5], ta[4]);
/* Now compute an unbalanced Feistel cipher with 80-bit block */
/* size on the 80-bit block P1K[], using the 128-bit key TK[] */
for (i = 0; i < PHASE1_LOOP_CNT; i++) {
/* Each add operation here is mod 2**16 */
p1k[0] += _S_(p1k[4] ^ TK16((i&1)+0));
p1k[1] += _S_(p1k[0] ^ TK16((i&1)+2));
p1k[2] += _S_(p1k[1] ^ TK16((i&1)+4));
p1k[3] += _S_(p1k[2] ^ TK16((i&1)+6));
p1k[4] += _S_(p1k[3] ^ TK16((i&1)+0));
p1k[4] += (unsigned short)i; /* avoid "slide attacks" */
}
}
/*
**********************************************************************
* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
*
* Inputs:
* tk[] = Temporal key [128 bits]
* p1k[] = Phase 1 output key [ 80 bits]
* iv16 = low 16 bits of IV counter [ 16 bits]
* Output:
* rc4key[] = the key used to encrypt the packet [128 bits]
*
* Note:
* The value {TA, IV32, IV16} for Phase1/Phase2 must be unique
* across all packets using the same key TK value. Then, for a
* given value of TK[], this TKIP48 construction guarantees that
* the final RC4KEY value is unique across all packets.
*
* Suggested implementation optimization: if PPK[] is "overlaid"
* appropriately on RC4KEY[], there is no need for the final
* for loop below that copies the PPK[] result into RC4KEY[].
*
**********************************************************************
*/
static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
{
signed int i;
u16 PPK[6]; /* temporary key for mixing */
/* Note: all adds in the PPK[] equations below are mod 2**16 */
for (i = 0; i < 5; i++)
PPK[i] = p1k[i]; /* first, copy P1K to PPK */
PPK[5] = p1k[4]+iv16; /* next, add in IV16 */
/* Bijective non-linear mixing of the 96 bits of PPK[0..5] */
PPK[0] += _S_(PPK[5] ^ TK16(0)); /* Mix key in each "round" */
PPK[1] += _S_(PPK[0] ^ TK16(1));
PPK[2] += _S_(PPK[1] ^ TK16(2));
PPK[3] += _S_(PPK[2] ^ TK16(3));
PPK[4] += _S_(PPK[3] ^ TK16(4));
PPK[5] += _S_(PPK[4] ^ TK16(5)); /* Total # S-box lookups == 6 */
/* Final sweep: bijective, "linear". Rotates kill LSB correlations */
PPK[0] += RotR1(PPK[5] ^ TK16(6));
PPK[1] += RotR1(PPK[0] ^ TK16(7)); /* Use all of TK[] in Phase2 */
PPK[2] += RotR1(PPK[1]);
PPK[3] += RotR1(PPK[2]);
PPK[4] += RotR1(PPK[3]);
PPK[5] += RotR1(PPK[4]);
/* Note: At this point, for a given key TK[0..15], the 96-bit output */
/* value PPK[0..5] is guaranteed to be unique, as a function */
/* of the 96-bit "input" value {TA, IV32, IV16}. That is, P1K */
/* is now a keyed permutation of {TA, IV32, IV16}. */
/* Set RC4KEY[0..3], which includes "cleartext" portion of RC4 key */
rc4key[0] = Hi8(iv16); /* RC4KEY[0..2] is the WEP IV */
rc4key[1] = (Hi8(iv16) | 0x20) & 0x7F; /* Help avoid weak (FMS) keys */
rc4key[2] = Lo8(iv16);
rc4key[3] = Lo8((PPK[5] ^ TK16(0)) >> 1);
/* Copy 96 bits of PPK[0..5] to RC4KEY[4..15] (little-endian) */
for (i = 0; i < 6; i++) {
rc4key[4+2*i] = Lo8(PPK[i]);
rc4key[5+2*i] = Hi8(PPK[i]);
}
}
/* The hlen isn't include the IV */
u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
{ /* exclude ICV */
u16 pnl;
u32 pnh;
u8 rc4key[16];
u8 ttkey[16];
union {
__le32 f0;
u8 f1[4];
} crc;
u8 hw_hdr_offset = 0;
signed int curfragnum, length;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct arc4_ctx *ctx = &psecuritypriv->xmit_arc4_ctx;
u32 res = _SUCCESS;
if (!((struct xmit_frame *)pxmitframe)->buf_addr)
return _FAIL;
hw_hdr_offset = TXDESC_OFFSET;
pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
/* 4 start to encrypt each fragment */
if (pattrib->encrypt == _TKIP_) {
{
if (is_multicast_ether_addr(pattrib->ra))
prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
else
prwskey = pattrib->dot118021x_UncstKey.skey;
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
iv = pframe+pattrib->hdrlen;
payload = pframe+pattrib->iv_len+pattrib->hdrlen;
GET_TKIP_PN(iv, dot11txpn);
pnl = (u16)(dot11txpn.val);
pnh = (u32)(dot11txpn.val>>16);
phase1((u16 *)&ttkey[0], prwskey, &pattrib->ta[0], pnh);
phase2(&rc4key[0], prwskey, (u16 *)&ttkey[0], pnl);
if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
arc4_setkey(ctx, rc4key, 16);
arc4_crypt(ctx, payload, payload, length);
arc4_crypt(ctx, payload + length, crc.f1, 4);
} else {
length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
arc4_setkey(ctx, rc4key, 16);
arc4_crypt(ctx, payload, payload, length);
arc4_crypt(ctx, payload + length, crc.f1, 4);
pframe += pxmitpriv->frag_len;
pframe = (u8 *)round_up((SIZE_PTR)(pframe), 4);
}
}
}
}
return res;
}
/* The hlen isn't include the IV */
u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
{ /* exclude ICV */
u16 pnl;
u32 pnh;
u8 rc4key[16];
u8 ttkey[16];
u8 crc[4];
signed int length;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct arc4_ctx *ctx = &psecuritypriv->recv_arc4_ctx;
u32 res = _SUCCESS;
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
/* 4 start to decrypt recvframe */
if (prxattrib->encrypt == _TKIP_) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
if (stainfo) {
if (is_multicast_ether_addr(prxattrib->ra)) {
static unsigned long start;
static u32 no_gkey_bc_cnt;
static u32 no_gkey_mc_cnt;
if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
if (start == 0)
start = jiffies;
if (is_broadcast_mac_addr(prxattrib->ra))
no_gkey_bc_cnt++;
else
no_gkey_mc_cnt++;
if (jiffies_to_msecs(jiffies - start) > 1000) {
if (no_gkey_bc_cnt || no_gkey_mc_cnt) {
netdev_dbg(padapter->pnetdev,
FUNC_ADPT_FMT " no_gkey_bc_cnt:%u, no_gkey_mc_cnt:%u\n",
FUNC_ADPT_ARG(padapter),
no_gkey_bc_cnt,
no_gkey_mc_cnt);
}
start = jiffies;
no_gkey_bc_cnt = 0;
no_gkey_mc_cnt = 0;
}
goto exit;
}
if (no_gkey_bc_cnt || no_gkey_mc_cnt) {
netdev_dbg(padapter->pnetdev,
FUNC_ADPT_FMT " gkey installed. no_gkey_bc_cnt:%u, no_gkey_mc_cnt:%u\n",
FUNC_ADPT_ARG(padapter),
no_gkey_bc_cnt,
no_gkey_mc_cnt);
}
start = 0;
no_gkey_bc_cnt = 0;
no_gkey_mc_cnt = 0;
prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
} else {
prwskey = &stainfo->dot118021x_UncstKey.skey[0];
}
iv = pframe+prxattrib->hdrlen;
payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
GET_TKIP_PN(iv, dot11txpn);
pnl = (u16)(dot11txpn.val);
pnh = (u32)(dot11txpn.val>>16);
phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
/* 4 decrypt payload include icv */
arc4_setkey(ctx, rc4key, 16);
arc4_crypt(ctx, payload, payload, length);
*((u32 *)crc) = ~crc32_le(~0, payload, length - 4);
if (crc[3] != payload[length - 1] || crc[2] != payload[length - 2] ||
crc[1] != payload[length - 3] || crc[0] != payload[length - 4])
res = _FAIL;
} else {
res = _FAIL;
}
}
exit:
return res;
}
/* 3 =====AES related ===== */
#define MAX_MSG_SIZE 2048
/*****************************/
/**** Function Prototypes ****/
/*****************************/
static void bitwise_xor(u8 *ina, u8 *inb, u8 *out);
static void construct_mic_iv(u8 *mic_header1,
signed int qc_exists,
signed int a4_exists,
u8 *mpdu,
uint payload_length,
u8 *pn_vector,
uint frtype); /* add for CONFIG_IEEE80211W, none 11w also can use */
static void construct_mic_header1(u8 *mic_header1,
signed int header_length,
u8 *mpdu,
uint frtype); /* for CONFIG_IEEE80211W, none 11w also can use */
static void construct_mic_header2(u8 *mic_header2,
u8 *mpdu,
signed int a4_exists,
signed int qc_exists);
static void construct_ctr_preload(u8 *ctr_preload,
signed int a4_exists,
signed int qc_exists,
u8 *mpdu,
u8 *pn_vector,
signed int c,
uint frtype); /* for CONFIG_IEEE80211W, none 11w also can use */
static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext);
/****************************************/
/* aes128k128d() */
/* Performs a 128 bit AES encrypt with */
/* 128 bit data. */
/****************************************/
static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
{
struct crypto_aes_ctx ctx;
aes_expandkey(&ctx, key, 16);
aes_encrypt(&ctx, ciphertext, data);
memzero_explicit(&ctx, sizeof(ctx));
}
/************************************************/
/* construct_mic_iv() */
/* Builds the MIC IV from header fields and PN */
/* Baron think the function is construct CCM */
/* nonce */
/************************************************/
static void construct_mic_iv(u8 *mic_iv,
signed int qc_exists,
signed int a4_exists,
u8 *mpdu,
uint payload_length,
u8 *pn_vector,
uint frtype) /* add for CONFIG_IEEE80211W, none 11w also can use */
{
signed int i;
mic_iv[0] = 0x59;
if (qc_exists && a4_exists)
mic_iv[1] = mpdu[30] & 0x0f; /* QoS_TC */
if (qc_exists && !a4_exists)
mic_iv[1] = mpdu[24] & 0x0f; /* mute bits 7-4 */
if (!qc_exists)
mic_iv[1] = 0x00;
/* 802.11w management frame should set management bit(4) */
if (frtype == WIFI_MGT_TYPE)
mic_iv[1] |= BIT(4);
for (i = 2; i < 8; i++)
mic_iv[i] = mpdu[i + 8]; /* mic_iv[2:7] = A2[0:5] = mpdu[10:15] */
#ifdef CONSISTENT_PN_ORDER
for (i = 8; i < 14; i++)
mic_iv[i] = pn_vector[i - 8]; /* mic_iv[8:13] = PN[0:5] */
#else
for (i = 8; i < 14; i++)
mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
#endif
mic_iv[14] = (unsigned char) (payload_length / 256);
mic_iv[15] = (unsigned char) (payload_length % 256);
}
/************************************************/
/* construct_mic_header1() */
/* Builds the first MIC header block from */
/* header fields. */
/* Build AAD SC, A1, A2 */
/************************************************/
static void construct_mic_header1(u8 *mic_header1,
signed int header_length,
u8 *mpdu,
uint frtype) /* for CONFIG_IEEE80211W, none 11w also can use */
{
mic_header1[0] = (u8)((header_length - 2) / 256);
mic_header1[1] = (u8)((header_length - 2) % 256);
/* 802.11w management frame don't AND subtype bits 4, 5, 6 of frame control field */
if (frtype == WIFI_MGT_TYPE)
mic_header1[2] = mpdu[0];
else
mic_header1[2] = mpdu[0] & 0xcf; /* Mute CF poll & CF ack bits */
mic_header1[3] = mpdu[1] & 0xc7; /* Mute retry, more data and pwr mgt bits */
mic_header1[4] = mpdu[4]; /* A1 */
mic_header1[5] = mpdu[5];
mic_header1[6] = mpdu[6];
mic_header1[7] = mpdu[7];
mic_header1[8] = mpdu[8];
mic_header1[9] = mpdu[9];
mic_header1[10] = mpdu[10]; /* A2 */
mic_header1[11] = mpdu[11];
mic_header1[12] = mpdu[12];
mic_header1[13] = mpdu[13];
mic_header1[14] = mpdu[14];
mic_header1[15] = mpdu[15];
}
/************************************************/
/* construct_mic_header2() */
/* Builds the last MIC header block from */
/* header fields. */
/************************************************/
static void construct_mic_header2(u8 *mic_header2,
u8 *mpdu,
signed int a4_exists,
signed int qc_exists)
{
signed int i;
for (i = 0; i < 16; i++)
mic_header2[i] = 0x00;
mic_header2[0] = mpdu[16]; /* A3 */
mic_header2[1] = mpdu[17];
mic_header2[2] = mpdu[18];
mic_header2[3] = mpdu[19];
mic_header2[4] = mpdu[20];
mic_header2[5] = mpdu[21];
mic_header2[6] = 0x00;
mic_header2[7] = 0x00; /* mpdu[23]; */
if (!qc_exists && a4_exists) {
for (i = 0; i < 6; i++)
mic_header2[8+i] = mpdu[24+i]; /* A4 */
}
if (qc_exists && !a4_exists) {
mic_header2[8] = mpdu[24] & 0x0f; /* mute bits 15 - 4 */
mic_header2[9] = mpdu[25] & 0x00;
}
if (qc_exists && a4_exists) {
for (i = 0; i < 6; i++)
mic_header2[8+i] = mpdu[24+i]; /* A4 */
mic_header2[14] = mpdu[30] & 0x0f;
mic_header2[15] = mpdu[31] & 0x00;
}
}
/************************************************/
/* construct_mic_header2() */
/* Builds the last MIC header block from */
/* header fields. */
/* Baron think the function is construct CCM */
/* nonce */
/************************************************/
static void construct_ctr_preload(u8 *ctr_preload,
signed int a4_exists,
signed int qc_exists,
u8 *mpdu,
u8 *pn_vector,
signed int c,
uint frtype) /* for CONFIG_IEEE80211W, none 11w also can use */
{
signed int i = 0;
for (i = 0; i < 16; i++)
ctr_preload[i] = 0x00;
i = 0;
ctr_preload[0] = 0x01; /* flag */
if (qc_exists && a4_exists)
ctr_preload[1] = mpdu[30] & 0x0f; /* QoC_Control */
if (qc_exists && !a4_exists)
ctr_preload[1] = mpdu[24] & 0x0f;
/* 802.11w management frame should set management bit(4) */
if (frtype == WIFI_MGT_TYPE)
ctr_preload[1] |= BIT(4);
for (i = 2; i < 8; i++)
ctr_preload[i] = mpdu[i + 8]; /* ctr_preload[2:7] = A2[0:5] = mpdu[10:15] */
#ifdef CONSISTENT_PN_ORDER
for (i = 8; i < 14; i++)
ctr_preload[i] = pn_vector[i - 8]; /* ctr_preload[8:13] = PN[0:5] */
#else
for (i = 8; i < 14; i++)
ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
#endif
ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */
ctr_preload[15] = (unsigned char) (c % 256);
}
/************************************/
/* bitwise_xor() */
/* A 128 bit, bitwise exclusive or */
/************************************/
static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
{
signed int i;
for (i = 0; i < 16; i++)
out[i] = ina[i] ^ inb[i];
}
static signed int aes_cipher(u8 *key, uint hdrlen,
u8 *pframe, uint plen)
{
uint qc_exists, a4_exists, i, j, payload_remainder,
num_blocks, payload_index;
u8 pn_vector[6];
u8 mic_iv[16];
u8 mic_header1[16];
u8 mic_header2[16];
u8 ctr_preload[16];
/* Intermediate Buffers */
u8 chain_buffer[16];
u8 aes_out[16];
u8 padded_buffer[16];
u8 mic[8];
uint frtype = GetFrameType(pframe);
uint frsubtype = GetFrameSubType(pframe);
frsubtype = frsubtype>>4;
memset((void *)mic_iv, 0, 16);
memset((void *)mic_header1, 0, 16);
memset((void *)mic_header2, 0, 16);
memset((void *)ctr_preload, 0, 16);
memset((void *)chain_buffer, 0, 16);
memset((void *)aes_out, 0, 16);
memset((void *)padded_buffer, 0, 16);
if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
a4_exists = 0;
else
a4_exists = 1;
if (((frtype|frsubtype) == WIFI_DATA_CFACK) ||
((frtype|frsubtype) == WIFI_DATA_CFPOLL) ||
((frtype|frsubtype) == WIFI_DATA_CFACKPOLL)) {
qc_exists = 1;
if (hdrlen != WLAN_HDR_A3_QOS_LEN)
hdrlen += 2;
} else if ((frtype == WIFI_DATA) && /* add for CONFIG_IEEE80211W, none 11w also can use */
((frsubtype == 0x08) ||
(frsubtype == 0x09) ||
(frsubtype == 0x0a) ||
(frsubtype == 0x0b))) {
if (hdrlen != WLAN_HDR_A3_QOS_LEN)
hdrlen += 2;
qc_exists = 1;
} else {
qc_exists = 0;
}
pn_vector[0] = pframe[hdrlen];
pn_vector[1] = pframe[hdrlen+1];
pn_vector[2] = pframe[hdrlen+4];
pn_vector[3] = pframe[hdrlen+5];
pn_vector[4] = pframe[hdrlen+6];
pn_vector[5] = pframe[hdrlen+7];
construct_mic_iv(mic_iv,
qc_exists,
a4_exists,
pframe, /* message, */
plen,
pn_vector,
frtype); /* add for CONFIG_IEEE80211W, none 11w also can use */
construct_mic_header1(mic_header1,
hdrlen,
pframe, /* message */
frtype); /* add for CONFIG_IEEE80211W, none 11w also can use */
construct_mic_header2(mic_header2,
pframe, /* message, */
a4_exists,
qc_exists);
payload_remainder = plen % 16;
num_blocks = plen / 16;
/* Find start of payload */
payload_index = (hdrlen + 8);
/* Calculate MIC */
aes128k128d(key, mic_iv, aes_out);
bitwise_xor(aes_out, mic_header1, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
bitwise_xor(aes_out, mic_header2, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
for (i = 0; i < num_blocks; i++) {
bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
payload_index += 16;
aes128k128d(key, chain_buffer, aes_out);
}
/* Add on the final payload block if it needs padding */
if (payload_remainder > 0) {
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < payload_remainder; j++)
padded_buffer[j] = pframe[payload_index++];
bitwise_xor(aes_out, padded_buffer, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
}
for (j = 0 ; j < 8; j++)
mic[j] = aes_out[j];
/* Insert MIC into payload */
for (j = 0; j < 8; j++)
pframe[payload_index+j] = mic[j];
payload_index = hdrlen + 8;
for (i = 0; i < num_blocks; i++) {
construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, /* message, */
pn_vector, i+1, frtype);
/* add for CONFIG_IEEE80211W, none 11w also can use */
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
for (j = 0; j < 16; j++)
pframe[payload_index++] = chain_buffer[j];
}
if (payload_remainder > 0) {
/* If there is a short final block, then pad it,*/
/* encrypt it and copy the unpadded part back */
construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, /* message, */
pn_vector, num_blocks+1, frtype);
/* add for CONFIG_IEEE80211W, none 11w also can use */
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < payload_remainder; j++)
padded_buffer[j] = pframe[payload_index+j];
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < payload_remainder; j++)
pframe[payload_index++] = chain_buffer[j];
}
/* Encrypt the MIC */
construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, /* message, */
pn_vector, 0, frtype);
/* add for CONFIG_IEEE80211W, none 11w also can use */
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < 8; j++)
padded_buffer[j] = pframe[j+hdrlen+8+plen];
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < 8; j++)
pframe[payload_index++] = chain_buffer[j];
return _SUCCESS;
}
u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
{ /* exclude ICV */
/*static*/
/* unsigned char message[MAX_MSG_SIZE]; */
/* Intermediate Buffers */
signed int curfragnum, length;
u8 *pframe, *prwskey; /* *payload,*iv */
u8 hw_hdr_offset = 0;
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
u32 res = _SUCCESS;
if (!((struct xmit_frame *)pxmitframe)->buf_addr)
return _FAIL;
hw_hdr_offset = TXDESC_OFFSET;
pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
/* 4 start to encrypt each fragment */
if (pattrib->encrypt == _AES_) {
if (is_multicast_ether_addr(pattrib->ra))
prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
else
prwskey = pattrib->dot118021x_UncstKey.skey;
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
} else {
length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
pframe += pxmitpriv->frag_len;
pframe = (u8 *)round_up((SIZE_PTR)(pframe), 4);
}
}
}
return res;
}
static signed int aes_decipher(u8 *key, uint hdrlen,
u8 *pframe, uint plen)
{
static u8 message[MAX_MSG_SIZE];
uint qc_exists, a4_exists, i, j, payload_remainder,
num_blocks, payload_index;
signed int res = _SUCCESS;
u8 pn_vector[6];
u8 mic_iv[16];
u8 mic_header1[16];
u8 mic_header2[16];
u8 ctr_preload[16];
/* Intermediate Buffers */
u8 chain_buffer[16];
u8 aes_out[16];
u8 padded_buffer[16];
u8 mic[8];
uint frtype = GetFrameType(pframe);
uint frsubtype = GetFrameSubType(pframe);
frsubtype = frsubtype>>4;
memset((void *)mic_iv, 0, 16);
memset((void *)mic_header1, 0, 16);
memset((void *)mic_header2, 0, 16);
memset((void *)ctr_preload, 0, 16);
memset((void *)chain_buffer, 0, 16);
memset((void *)aes_out, 0, 16);
memset((void *)padded_buffer, 0, 16);
/* start to decrypt the payload */
num_blocks = (plen-8) / 16; /* plen including LLC, payload_length and mic) */
payload_remainder = (plen-8) % 16;
pn_vector[0] = pframe[hdrlen];
pn_vector[1] = pframe[hdrlen + 1];
pn_vector[2] = pframe[hdrlen + 4];
pn_vector[3] = pframe[hdrlen + 5];
pn_vector[4] = pframe[hdrlen + 6];
pn_vector[5] = pframe[hdrlen + 7];
if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
a4_exists = 0;
else
a4_exists = 1;
if (((frtype|frsubtype) == WIFI_DATA_CFACK) ||
((frtype|frsubtype) == WIFI_DATA_CFPOLL) ||
((frtype|frsubtype) == WIFI_DATA_CFACKPOLL)) {
qc_exists = 1;
if (hdrlen != WLAN_HDR_A3_QOS_LEN)
hdrlen += 2;
} else if ((frtype == WIFI_DATA) && /* only for data packet . add for CONFIG_IEEE80211W, none 11w also can use */
((frsubtype == 0x08) ||
(frsubtype == 0x09) ||
(frsubtype == 0x0a) ||
(frsubtype == 0x0b))) {
if (hdrlen != WLAN_HDR_A3_QOS_LEN)
hdrlen += 2;
qc_exists = 1;
} else {
qc_exists = 0;
}
/* now, decrypt pframe with hdrlen offset and plen long */
payload_index = hdrlen + 8; /* 8 is for extiv */
for (i = 0; i < num_blocks; i++) {
construct_ctr_preload(ctr_preload, a4_exists,
qc_exists, pframe,
pn_vector, i + 1,
frtype); /* add for CONFIG_IEEE80211W, none 11w also can use */
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
for (j = 0; j < 16; j++)
pframe[payload_index++] = chain_buffer[j];
}
if (payload_remainder > 0) {
/* If there is a short final block, then pad it,*/
/* encrypt it and copy the unpadded part back */
construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector,
num_blocks+1, frtype);
/* add for CONFIG_IEEE80211W, none 11w also can use */
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < payload_remainder; j++)
padded_buffer[j] = pframe[payload_index+j];
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < payload_remainder; j++)
pframe[payload_index++] = chain_buffer[j];
}
/* start to calculate the mic */
if ((hdrlen + plen+8) <= MAX_MSG_SIZE)
memcpy((void *)message, pframe, (hdrlen + plen+8)); /* 8 is for ext iv len */
pn_vector[0] = pframe[hdrlen];
pn_vector[1] = pframe[hdrlen+1];
pn_vector[2] = pframe[hdrlen+4];
pn_vector[3] = pframe[hdrlen+5];
pn_vector[4] = pframe[hdrlen+6];
pn_vector[5] = pframe[hdrlen+7];
construct_mic_iv(mic_iv, qc_exists, a4_exists, message, plen-8, pn_vector, frtype);
/* add for CONFIG_IEEE80211W, none 11w also can use */
construct_mic_header1(mic_header1, hdrlen, message, frtype);
/* add for CONFIG_IEEE80211W, none 11w also can use */
construct_mic_header2(mic_header2, message, a4_exists, qc_exists);
payload_remainder = (plen-8) % 16;
num_blocks = (plen-8) / 16;
/* Find start of payload */
payload_index = (hdrlen + 8);
/* Calculate MIC */
aes128k128d(key, mic_iv, aes_out);
bitwise_xor(aes_out, mic_header1, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
bitwise_xor(aes_out, mic_header2, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
for (i = 0; i < num_blocks; i++) {
bitwise_xor(aes_out, &message[payload_index], chain_buffer);
payload_index += 16;
aes128k128d(key, chain_buffer, aes_out);
}
/* Add on the final payload block if it needs padding */
if (payload_remainder > 0) {
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < payload_remainder; j++)
padded_buffer[j] = message[payload_index++];
bitwise_xor(aes_out, padded_buffer, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
}
for (j = 0; j < 8; j++)
mic[j] = aes_out[j];
/* Insert MIC into payload */
for (j = 0; j < 8; j++)
message[payload_index+j] = mic[j];
payload_index = hdrlen + 8;
for (i = 0; i < num_blocks; i++) {
construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, i+1,
frtype);
/* add for CONFIG_IEEE80211W, none 11w also can use */
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, &message[payload_index], chain_buffer);
for (j = 0; j < 16; j++)
message[payload_index++] = chain_buffer[j];
}
if (payload_remainder > 0) {
/* If there is a short final block, then pad it,*/
/* encrypt it and copy the unpadded part back */
construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector,
num_blocks+1, frtype);
/* add for CONFIG_IEEE80211W, none 11w also can use */
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < payload_remainder; j++)
padded_buffer[j] = message[payload_index+j];
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < payload_remainder; j++)
message[payload_index++] = chain_buffer[j];
}
/* Encrypt the MIC */
construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, 0, frtype);
/* add for CONFIG_IEEE80211W, none 11w also can use */
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < 8; j++)
padded_buffer[j] = message[j+hdrlen+8+plen-8];
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < 8; j++)
message[payload_index++] = chain_buffer[j];
/* compare the mic */
for (i = 0; i < 8; i++) {
if (pframe[hdrlen + 8 + plen - 8 + i] != message[hdrlen + 8 + plen - 8 + i])
res = _FAIL;
}
return res;
}
u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
{ /* exclude ICV */
/*static*/
/* unsigned char message[MAX_MSG_SIZE]; */
/* Intermediate Buffers */
signed int length;
u8 *pframe, *prwskey; /* *payload,*iv */
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
u32 res = _SUCCESS;
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
/* 4 start to encrypt each fragment */
if (prxattrib->encrypt == _AES_) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
if (stainfo) {
if (is_multicast_ether_addr(prxattrib->ra)) {
static unsigned long start;
static u32 no_gkey_bc_cnt;
static u32 no_gkey_mc_cnt;
if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
if (start == 0)
start = jiffies;
if (is_broadcast_mac_addr(prxattrib->ra))
no_gkey_bc_cnt++;
else
no_gkey_mc_cnt++;
if (jiffies_to_msecs(jiffies - start) > 1000) {
if (no_gkey_bc_cnt || no_gkey_mc_cnt) {
netdev_dbg(padapter->pnetdev,
FUNC_ADPT_FMT " no_gkey_bc_cnt:%u, no_gkey_mc_cnt:%u\n",
FUNC_ADPT_ARG(padapter),
no_gkey_bc_cnt,
no_gkey_mc_cnt);
}
start = jiffies;
no_gkey_bc_cnt = 0;
no_gkey_mc_cnt = 0;
}
goto exit;
}
if (no_gkey_bc_cnt || no_gkey_mc_cnt) {
netdev_dbg(padapter->pnetdev,
FUNC_ADPT_FMT " gkey installed. no_gkey_bc_cnt:%u, no_gkey_mc_cnt:%u\n",
FUNC_ADPT_ARG(padapter),
no_gkey_bc_cnt,
no_gkey_mc_cnt);
}
start = 0;
no_gkey_bc_cnt = 0;
no_gkey_mc_cnt = 0;
prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
if (psecuritypriv->dot118021XGrpKeyid != prxattrib->key_index) {
res = _FAIL;
goto exit;
}
} else {
prwskey = &stainfo->dot118021x_UncstKey.skey[0];
}
length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
} else {
res = _FAIL;
}
}
exit:
return res;
}
u32 rtw_BIP_verify(struct adapter *padapter, u8 *precvframe)
{
struct rx_pkt_attrib *pattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
u8 *pframe;
u8 *BIP_AAD, *p;
u32 res = _FAIL;
uint len, ori_len;
struct ieee80211_hdr *pwlanhdr;
u8 mic[16];
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
__le16 le_tmp;
__le64 le_tmp64;
ori_len = pattrib->pkt_len-WLAN_HDR_A3_LEN+BIP_AAD_SIZE;
BIP_AAD = rtw_zmalloc(ori_len);
if (!BIP_AAD)
return _FAIL;
/* PKT start */
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
/* mapping to wlan header */
pwlanhdr = (struct ieee80211_hdr *)pframe;
/* save the frame body + MME */
memcpy(BIP_AAD+BIP_AAD_SIZE, pframe+WLAN_HDR_A3_LEN, pattrib->pkt_len-WLAN_HDR_A3_LEN);
/* find MME IE pointer */
p = rtw_get_ie(BIP_AAD+BIP_AAD_SIZE, WLAN_EID_MMIE, &len, pattrib->pkt_len-WLAN_HDR_A3_LEN);
/* Baron */
if (p) {
u16 keyid = 0;
u64 temp_ipn = 0;
/* save packet number */
memcpy(&le_tmp64, p+4, 6);
temp_ipn = le64_to_cpu(le_tmp64);
/* BIP packet number should bigger than previous BIP packet */
if (temp_ipn <= pmlmeext->mgnt_80211w_IPN_rx)
goto BIP_exit;
/* copy key index */
memcpy(&le_tmp, p+2, 2);
keyid = le16_to_cpu(le_tmp);
if (keyid != padapter->securitypriv.dot11wBIPKeyid)
goto BIP_exit;
/* clear the MIC field of MME to zero */
memset(p+2+len-8, 0, 8);
/* conscruct AAD, copy frame control field */
memcpy(BIP_AAD, &pwlanhdr->frame_control, 2);
ClearRetry(BIP_AAD);
ClearPwrMgt(BIP_AAD);
ClearMData(BIP_AAD);
/* conscruct AAD, copy address 1 to address 3 */
memcpy(BIP_AAD+2, pwlanhdr->addr1, 18);
if (omac1_aes_128(padapter->securitypriv.dot11wBIPKey[padapter->securitypriv.dot11wBIPKeyid].skey
, BIP_AAD, ori_len, mic))
goto BIP_exit;
/* MIC field should be last 8 bytes of packet (packet without FCS) */
if (!memcmp(mic, pframe+pattrib->pkt_len-8, 8)) {
pmlmeext->mgnt_80211w_IPN_rx = temp_ipn;
res = _SUCCESS;
} else {
}
} else {
res = RTW_RX_HANDLED;
}
BIP_exit:
kfree(BIP_AAD);
return res;
}
static void gf_mulx(u8 *pad)
{
int i, carry;
carry = pad[0] & 0x80;
for (i = 0; i < AES_BLOCK_SIZE - 1; i++)
pad[i] = (pad[i] << 1) | (pad[i + 1] >> 7);
pad[AES_BLOCK_SIZE - 1] <<= 1;
if (carry)
pad[AES_BLOCK_SIZE - 1] ^= 0x87;
}
/**
* omac1_aes_128_vector - One-Key CBC MAC (OMAC1) hash with AES-128
* @key: 128-bit key for the hash operation
* @num_elem: Number of elements in the data vector
* @addr: Pointers to the data areas
* @len: Lengths of the data blocks
* @mac: Buffer for MAC (128 bits, i.e., 16 bytes)
* Returns: 0 on success, -1 on failure
*
* This is a mode for using block cipher (AES in this case) for authentication.
* OMAC1 was standardized with the name CMAC by NIST in a Special Publication
* (SP) 800-38B.
*/
static int omac1_aes_128_vector(u8 *key, size_t num_elem,
u8 *addr[], size_t *len, u8 *mac)
{
struct crypto_aes_ctx ctx;
u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
u8 *pos, *end;
size_t i, e, left, total_len;
int ret;
ret = aes_expandkey(&ctx, key, 16);
if (ret)
return -1;
memset(cbc, 0, AES_BLOCK_SIZE);
total_len = 0;
for (e = 0; e < num_elem; e++)
total_len += len[e];
left = total_len;
e = 0;
pos = addr[0];
end = pos + len[0];
while (left >= AES_BLOCK_SIZE) {
for (i = 0; i < AES_BLOCK_SIZE; i++) {
cbc[i] ^= *pos++;
if (pos >= end) {
e++;
pos = addr[e];
end = pos + len[e];
}
}
if (left > AES_BLOCK_SIZE)
aes_encrypt(&ctx, cbc, cbc);
left -= AES_BLOCK_SIZE;
}
memset(pad, 0, AES_BLOCK_SIZE);
aes_encrypt(&ctx, pad, pad);
gf_mulx(pad);
if (left || total_len == 0) {
for (i = 0; i < left; i++) {
cbc[i] ^= *pos++;
if (pos >= end) {
e++;
pos = addr[e];
end = pos + len[e];
}
}
cbc[left] ^= 0x80;
gf_mulx(pad);
}
for (i = 0; i < AES_BLOCK_SIZE; i++)
pad[i] ^= cbc[i];
aes_encrypt(&ctx, pad, mac);
memzero_explicit(&ctx, sizeof(ctx));
return 0;
}
/**
* omac1_aes_128 - One-Key CBC MAC (OMAC1) hash with AES-128 (aka AES-CMAC)
* @key: 128-bit key for the hash operation
* @data: Data buffer for which a MAC is determined
* @data_len: Length of data buffer in bytes
* @mac: Buffer for MAC (128 bits, i.e., 16 bytes)
* Returns: 0 on success, -1 on failure
*
* This is a mode for using block cipher (AES in this case) for authentication.
* OMAC1 was standardized with the name CMAC by NIST in a Special Publication
* (SP) 800-38B.
* modify for CONFIG_IEEE80211W */
int omac1_aes_128(u8 *key, u8 *data, size_t data_len, u8 *mac)
{
return omac1_aes_128_vector(key, 1, &data, &data_len, mac);
}
/* Restore HW wep key setting according to key_mask */
void rtw_sec_restore_wep_key(struct adapter *adapter)
{
struct security_priv *securitypriv = &(adapter->securitypriv);
signed int keyid;
if ((_WEP40_ == securitypriv->dot11PrivacyAlgrthm) || (_WEP104_ == securitypriv->dot11PrivacyAlgrthm)) {
for (keyid = 0; keyid < 4; keyid++) {
if (securitypriv->key_mask & BIT(keyid)) {
if (keyid == securitypriv->dot11PrivacyKeyIndex)
rtw_set_key(adapter, securitypriv, keyid, 1, false);
else
rtw_set_key(adapter, securitypriv, keyid, 0, false);
}
}
}
}
u8 rtw_handle_tkip_countermeasure(struct adapter *adapter, const char *caller)
{
struct security_priv *securitypriv = &(adapter->securitypriv);
u8 status = _SUCCESS;
if (securitypriv->btkip_countermeasure) {
unsigned long passing_ms = jiffies_to_msecs(jiffies - securitypriv->btkip_countermeasure_time);
if (passing_ms > 60*1000) {
netdev_dbg(adapter->pnetdev,
"%s(%s) countermeasure time:%lus > 60s\n",
caller, ADPT_ARG(adapter),
passing_ms / 1000);
securitypriv->btkip_countermeasure = false;
securitypriv->btkip_countermeasure_time = 0;
} else {
netdev_dbg(adapter->pnetdev,
"%s(%s) countermeasure time:%lus < 60s\n",
caller, ADPT_ARG(adapter),
passing_ms / 1000);
status = _FAIL;
}
}
return status;
}
| linux-master | drivers/staging/rtl8723bs/core/rtw_security.c |
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
#include <drv_types.h>
#include <rtw_debug.h>
#include <hal_btcoex.h>
#include <linux/jiffies.h>
static struct _cmd_callback rtw_cmd_callback[] = {
{GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
{GEN_CMD_CODE(_Write_MACREG), NULL},
{GEN_CMD_CODE(_Read_BBREG), &rtw_getbbrfreg_cmdrsp_callback},
{GEN_CMD_CODE(_Write_BBREG), NULL},
{GEN_CMD_CODE(_Read_RFREG), &rtw_getbbrfreg_cmdrsp_callback},
{GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
{GEN_CMD_CODE(_Read_EEPROM), NULL},
{GEN_CMD_CODE(_Write_EEPROM), NULL},
{GEN_CMD_CODE(_Read_EFUSE), NULL},
{GEN_CMD_CODE(_Write_EFUSE), NULL},
{GEN_CMD_CODE(_Read_CAM), NULL}, /*10*/
{GEN_CMD_CODE(_Write_CAM), NULL},
{GEN_CMD_CODE(_setBCNITV), NULL},
{GEN_CMD_CODE(_setMBIDCFG), NULL},
{GEN_CMD_CODE(_JoinBss), &rtw_joinbss_cmd_callback}, /*14*/
{GEN_CMD_CODE(_DisConnect), &rtw_disassoc_cmd_callback}, /*15*/
{GEN_CMD_CODE(_CreateBss), &rtw_createbss_cmd_callback},
{GEN_CMD_CODE(_SetOpMode), NULL},
{GEN_CMD_CODE(_SiteSurvey), &rtw_survey_cmd_callback}, /*18*/
{GEN_CMD_CODE(_SetAuth), NULL},
{GEN_CMD_CODE(_SetKey), NULL}, /*20*/
{GEN_CMD_CODE(_SetStaKey), &rtw_setstaKey_cmdrsp_callback},
{GEN_CMD_CODE(_SetAssocSta), &rtw_setassocsta_cmdrsp_callback},
{GEN_CMD_CODE(_DelAssocSta), NULL},
{GEN_CMD_CODE(_SetStaPwrState), NULL},
{GEN_CMD_CODE(_SetBasicRate), NULL}, /*25*/
{GEN_CMD_CODE(_GetBasicRate), NULL},
{GEN_CMD_CODE(_SetDataRate), NULL},
{GEN_CMD_CODE(_GetDataRate), NULL},
{GEN_CMD_CODE(_SetPhyInfo), NULL},
{GEN_CMD_CODE(_GetPhyInfo), NULL}, /*30*/
{GEN_CMD_CODE(_SetPhy), NULL},
{GEN_CMD_CODE(_GetPhy), NULL},
{GEN_CMD_CODE(_readRssi), NULL},
{GEN_CMD_CODE(_readGain), NULL},
{GEN_CMD_CODE(_SetAtim), NULL}, /*35*/
{GEN_CMD_CODE(_SetPwrMode), NULL},
{GEN_CMD_CODE(_JoinbssRpt), NULL},
{GEN_CMD_CODE(_SetRaTable), NULL},
{GEN_CMD_CODE(_GetRaTable), NULL},
{GEN_CMD_CODE(_GetCCXReport), NULL}, /*40*/
{GEN_CMD_CODE(_GetDTMReport), NULL},
{GEN_CMD_CODE(_GetTXRateStatistics), NULL},
{GEN_CMD_CODE(_SetUsbSuspend), NULL},
{GEN_CMD_CODE(_SetH2cLbk), NULL},
{GEN_CMD_CODE(_AddBAReq), NULL}, /*45*/
{GEN_CMD_CODE(_SetChannel), NULL}, /*46*/
{GEN_CMD_CODE(_SetTxPower), NULL},
{GEN_CMD_CODE(_SwitchAntenna), NULL},
{GEN_CMD_CODE(_SetCrystalCap), NULL},
{GEN_CMD_CODE(_SetSingleCarrierTx), NULL}, /*50*/
{GEN_CMD_CODE(_SetSingleToneTx), NULL}, /*51*/
{GEN_CMD_CODE(_SetCarrierSuppressionTx), NULL},
{GEN_CMD_CODE(_SetContinuousTx), NULL},
{GEN_CMD_CODE(_SwitchBandwidth), NULL}, /*54*/
{GEN_CMD_CODE(_TX_Beacon), NULL},/*55*/
{GEN_CMD_CODE(_Set_MLME_EVT), NULL},/*56*/
{GEN_CMD_CODE(_Set_Drv_Extra), NULL},/*57*/
{GEN_CMD_CODE(_Set_H2C_MSG), NULL},/*58*/
{GEN_CMD_CODE(_SetChannelPlan), NULL},/*59*/
{GEN_CMD_CODE(_SetChannelSwitch), NULL},/*60*/
{GEN_CMD_CODE(_TDLS), NULL},/*61*/
{GEN_CMD_CODE(_ChkBMCSleepq), NULL}, /*62*/
{GEN_CMD_CODE(_RunInThreadCMD), NULL},/*63*/
};
static struct cmd_hdl wlancmds[] = {
GEN_DRV_CMD_HANDLER(0, NULL) /*0*/
GEN_DRV_CMD_HANDLER(0, NULL)
GEN_DRV_CMD_HANDLER(0, NULL)
GEN_DRV_CMD_HANDLER(0, NULL)
GEN_DRV_CMD_HANDLER(0, NULL)
GEN_DRV_CMD_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL) /*10*/
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct joinbss_parm), join_cmd_hdl) /*14*/
GEN_MLME_EXT_HANDLER(sizeof(struct disconnect_parm), disconnect_hdl)
GEN_MLME_EXT_HANDLER(sizeof(struct createbss_parm), createbss_hdl)
GEN_MLME_EXT_HANDLER(sizeof(struct setopmode_parm), setopmode_hdl)
GEN_MLME_EXT_HANDLER(sizeof(struct sitesurvey_parm), sitesurvey_cmd_hdl) /*18*/
GEN_MLME_EXT_HANDLER(sizeof(struct setauth_parm), setauth_hdl)
GEN_MLME_EXT_HANDLER(sizeof(struct setkey_parm), setkey_hdl) /*20*/
GEN_MLME_EXT_HANDLER(sizeof(struct set_stakey_parm), set_stakey_hdl)
GEN_MLME_EXT_HANDLER(sizeof(struct set_assocsta_parm), NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct del_assocsta_parm), NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct setstapwrstate_parm), NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct setbasicrate_parm), NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct getbasicrate_parm), NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct setdatarate_parm), NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct getdatarate_parm), NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct setphyinfo_parm), NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct getphyinfo_parm), NULL) /*30*/
GEN_MLME_EXT_HANDLER(sizeof(struct setphy_parm), NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct getphy_parm), NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL) /*40*/
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct addBaReq_parm), add_ba_hdl)
GEN_MLME_EXT_HANDLER(sizeof(struct set_ch_parm), set_ch_hdl) /* 46 */
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL) /*50*/
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct Tx_Beacon_param), tx_beacon_hdl) /*55*/
GEN_MLME_EXT_HANDLER(0, mlme_evt_hdl) /*56*/
GEN_MLME_EXT_HANDLER(0, rtw_drvextra_cmd_hdl) /*57*/
GEN_MLME_EXT_HANDLER(0, h2c_msg_hdl) /*58*/
GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelPlan_param), set_chplan_hdl) /*59*/
GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelSwitch_param), set_csa_hdl) /*60*/
GEN_MLME_EXT_HANDLER(sizeof(struct TDLSoption_param), tdls_hdl) /*61*/
GEN_MLME_EXT_HANDLER(0, chk_bmc_sleepq_hdl) /*62*/
GEN_MLME_EXT_HANDLER(sizeof(struct RunInThread_param), run_in_thread_hdl) /*63*/
};
/*
* Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
* No irqsave is necessary.
*/
int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
init_completion(&pcmdpriv->cmd_queue_comp);
init_completion(&pcmdpriv->terminate_cmdthread_comp);
INIT_LIST_HEAD(&pcmdpriv->cmd_queue.queue);
spin_lock_init(&pcmdpriv->cmd_queue.lock);
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
pcmdpriv->cmd_seq = 1;
pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ);
if (!pcmdpriv->cmd_allocated_buf)
return -ENOMEM;
pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((SIZE_PTR)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1));
pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4);
if (!pcmdpriv->rsp_allocated_buf) {
kfree(pcmdpriv->cmd_allocated_buf);
return -ENOMEM;
}
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3);
pcmdpriv->cmd_issued_cnt = 0;
pcmdpriv->cmd_done_cnt = 0;
pcmdpriv->rsp_cnt = 0;
mutex_init(&pcmdpriv->sctx_mutex);
return 0;
}
static void c2h_wk_callback(struct work_struct *work);
int rtw_init_evt_priv(struct evt_priv *pevtpriv)
{
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
atomic_set(&pevtpriv->event_seq, 0);
pevtpriv->evt_done_cnt = 0;
_init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL);
pevtpriv->c2h_wk_alive = false;
pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN+1);
if (!pevtpriv->c2h_queue)
return -ENOMEM;
return 0;
}
void _rtw_free_evt_priv(struct evt_priv *pevtpriv)
{
_cancel_workitem_sync(&pevtpriv->c2h_wk);
while (pevtpriv->c2h_wk_alive)
msleep(10);
while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
if (c2h && c2h != (void *)pevtpriv)
kfree(c2h);
}
kfree(pevtpriv->c2h_queue);
}
void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
{
if (pcmdpriv) {
kfree(pcmdpriv->cmd_allocated_buf);
kfree(pcmdpriv->rsp_allocated_buf);
mutex_destroy(&pcmdpriv->sctx_mutex);
}
}
/*
* Calling Context:
*
* rtw_enqueue_cmd can only be called between kernel thread,
* since only spin_lock is used.
*
* ISR/Call-Back functions can't call this sub-function.
*
*/
int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
{
unsigned long irqL;
if (!obj)
goto exit;
/* spin_lock_bh(&queue->lock); */
spin_lock_irqsave(&queue->lock, irqL);
list_add_tail(&obj->list, &queue->queue);
/* spin_unlock_bh(&queue->lock); */
spin_unlock_irqrestore(&queue->lock, irqL);
exit:
return _SUCCESS;
}
struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
{
unsigned long irqL;
struct cmd_obj *obj;
/* spin_lock_bh(&(queue->lock)); */
spin_lock_irqsave(&queue->lock, irqL);
if (list_empty(&queue->queue))
obj = NULL;
else {
obj = container_of(get_next(&queue->queue), struct cmd_obj, list);
list_del_init(&obj->list);
}
/* spin_unlock_bh(&(queue->lock)); */
spin_unlock_irqrestore(&queue->lock, irqL);
return obj;
}
void rtw_free_evt_priv(struct evt_priv *pevtpriv)
{
_rtw_free_evt_priv(pevtpriv);
}
void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
{
_rtw_free_cmd_priv(pcmdpriv);
}
int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj);
int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
{
u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan))
bAllow = true;
if ((!pcmdpriv->padapter->hw_init_completed && !bAllow) ||
!atomic_read(&pcmdpriv->cmdthd_running)) /* com_thread not running */
return _FAIL;
return _SUCCESS;
}
int rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
{
int res = _FAIL;
struct adapter *padapter = pcmdpriv->padapter;
if (!cmd_obj)
goto exit;
cmd_obj->padapter = padapter;
res = rtw_cmd_filter(pcmdpriv, cmd_obj);
if (res == _FAIL) {
rtw_free_cmd_obj(cmd_obj);
goto exit;
}
res = _rtw_enqueue_cmd(&pcmdpriv->cmd_queue, cmd_obj);
if (res == _SUCCESS)
complete(&pcmdpriv->cmd_queue_comp);
exit:
return res;
}
struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
{
return _rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
}
void rtw_free_cmd_obj(struct cmd_obj *pcmd)
{
if ((pcmd->cmdcode != _JoinBss_CMD_) &&
(pcmd->cmdcode != _CreateBss_CMD_)) {
/* free parmbuf in cmd_obj */
kfree(pcmd->parmbuf);
}
if (pcmd->rsp) {
if (pcmd->rspsz != 0) {
/* free rsp in cmd_obj */
kfree(pcmd->rsp);
}
}
/* free cmd_obj */
kfree(pcmd);
}
void rtw_stop_cmd_thread(struct adapter *adapter)
{
if (adapter->cmdThread &&
atomic_read(&adapter->cmdpriv.cmdthd_running) &&
adapter->cmdpriv.stop_req == 0) {
adapter->cmdpriv.stop_req = 1;
complete(&adapter->cmdpriv.cmd_queue_comp);
wait_for_completion(&adapter->cmdpriv.terminate_cmdthread_comp);
}
}
int rtw_cmd_thread(void *context)
{
u8 ret;
struct cmd_obj *pcmd;
u8 *pcmdbuf;
u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf);
void (*pcmd_callback)(struct adapter *dev, struct cmd_obj *pcmd);
struct adapter *padapter = context;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct drvextra_cmd_parm *extra_parm = NULL;
thread_enter("RTW_CMD_THREAD");
pcmdbuf = pcmdpriv->cmd_buf;
pcmdpriv->stop_req = 0;
atomic_set(&pcmdpriv->cmdthd_running, true);
complete(&pcmdpriv->terminate_cmdthread_comp);
while (1) {
if (wait_for_completion_interruptible(&pcmdpriv->cmd_queue_comp)) {
netdev_dbg(padapter->pnetdev,
FUNC_ADPT_FMT " wait_for_completion_interruptible(&pcmdpriv->cmd_queue_comp) return != 0, break\n",
FUNC_ADPT_ARG(padapter));
break;
}
if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
netdev_dbg(padapter->pnetdev,
"%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
__func__, padapter->bDriverStopped,
padapter->bSurpriseRemoved, __LINE__);
break;
}
if (pcmdpriv->stop_req) {
netdev_dbg(padapter->pnetdev,
FUNC_ADPT_FMT " stop_req:%u, break\n",
FUNC_ADPT_ARG(padapter),
pcmdpriv->stop_req);
break;
}
if (list_empty(&pcmdpriv->cmd_queue.queue))
continue;
if (rtw_register_cmd_alive(padapter) != _SUCCESS)
continue;
_next:
if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
netdev_dbg(padapter->pnetdev,
"%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
__func__, padapter->bDriverStopped,
padapter->bSurpriseRemoved, __LINE__);
break;
}
pcmd = rtw_dequeue_cmd(pcmdpriv);
if (!pcmd) {
rtw_unregister_cmd_alive(padapter);
continue;
}
if (rtw_cmd_filter(pcmdpriv, pcmd) == _FAIL) {
pcmd->res = H2C_DROPPED;
goto post_process;
}
pcmdpriv->cmd_issued_cnt++;
pcmd->cmdsz = round_up((pcmd->cmdsz), 4);
memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
if (pcmd->cmdcode < ARRAY_SIZE(wlancmds)) {
cmd_hdl = wlancmds[pcmd->cmdcode].h2cfuns;
if (cmd_hdl) {
ret = cmd_hdl(pcmd->padapter, pcmdbuf);
pcmd->res = ret;
}
pcmdpriv->cmd_seq++;
} else {
pcmd->res = H2C_PARAMETERS_ERROR;
}
cmd_hdl = NULL;
post_process:
if (mutex_lock_interruptible(&pcmd->padapter->cmdpriv.sctx_mutex) == 0) {
if (pcmd->sctx) {
netdev_dbg(padapter->pnetdev,
FUNC_ADPT_FMT " pcmd->sctx\n",
FUNC_ADPT_ARG(pcmd->padapter));
if (pcmd->res == H2C_SUCCESS)
rtw_sctx_done(&pcmd->sctx);
else
rtw_sctx_done_err(&pcmd->sctx, RTW_SCTX_DONE_CMD_ERROR);
}
mutex_unlock(&pcmd->padapter->cmdpriv.sctx_mutex);
}
/* call callback function for post-processed */
if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback;
if (!pcmd_callback) {
rtw_free_cmd_obj(pcmd);
} else {
/* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
pcmd_callback(pcmd->padapter, pcmd);/* need consider that free cmd_obj in rtw_cmd_callback */
}
} else {
rtw_free_cmd_obj(pcmd);
}
flush_signals_thread();
goto _next;
}
/* free all cmd_obj resources */
do {
pcmd = rtw_dequeue_cmd(pcmdpriv);
if (!pcmd) {
rtw_unregister_cmd_alive(padapter);
break;
}
if (pcmd->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) {
extra_parm = (struct drvextra_cmd_parm *)pcmd->parmbuf;
if (extra_parm->pbuf && extra_parm->size > 0)
kfree(extra_parm->pbuf);
}
rtw_free_cmd_obj(pcmd);
} while (1);
complete(&pcmdpriv->terminate_cmdthread_comp);
atomic_set(&pcmdpriv->cmdthd_running, false);
return 0;
}
/*
* rtw_sitesurvey_cmd(~)
* ### NOTE:#### (!!!!)
* MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
*/
u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num,
struct rtw_ieee80211_channel *ch, int ch_num)
{
u8 res = _FAIL;
struct cmd_obj *ph2c;
struct sitesurvey_parm *psurveyPara;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (check_fwstate(pmlmepriv, _FW_LINKED))
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c)
return _FAIL;
psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
if (!psurveyPara) {
kfree(ph2c);
return _FAIL;
}
rtw_free_network_queue(padapter, false);
init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
/* psurveyPara->bsslimit = 48; */
psurveyPara->scan_mode = pmlmepriv->scan_mode;
/* prepare ssid list */
if (ssid) {
int i;
for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
if (ssid[i].ssid_length) {
memcpy(&psurveyPara->ssid[i], &ssid[i], sizeof(struct ndis_802_11_ssid));
psurveyPara->ssid_num++;
}
}
}
/* prepare channel list */
if (ch) {
int i;
for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
if (ch[i].hw_value && !(ch[i].flags & RTW_IEEE80211_CHAN_DISABLED)) {
memcpy(&psurveyPara->ch[i], &ch[i], sizeof(struct rtw_ieee80211_channel));
psurveyPara->ch_num++;
}
}
}
set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
if (res == _SUCCESS) {
pmlmepriv->scan_start_time = jiffies;
_set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT);
} else {
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
}
return res;
}
void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
/* rtw_free_cmd_obj(pcmd); */
kfree(pcmd->parmbuf);
kfree(pcmd);
}
u8 rtw_createbss_cmd(struct adapter *padapter)
{
struct cmd_obj *pcmd;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network;
u8 res = _SUCCESS;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (!pcmd) {
res = _FAIL;
goto exit;
}
INIT_LIST_HEAD(&pcmd->list);
pcmd->cmdcode = _CreateBss_CMD_;
pcmd->parmbuf = (unsigned char *)pdev_network;
pcmd->cmdsz = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
pcmd->rsp = NULL;
pcmd->rspsz = 0;
pdev_network->length = pcmd->cmdsz;
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
exit:
return res;
}
int rtw_startbss_cmd(struct adapter *padapter, int flags)
{
struct cmd_obj *pcmd;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct submit_ctx sctx;
int res = _SUCCESS;
if (flags & RTW_CMDF_DIRECTLY) {
/* no need to enqueue, do the cmd hdl directly and free cmd parameter */
start_bss_network(padapter);
} else {
/* need enqueue, prepare cmd_obj and enqueue */
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (!pcmd) {
res = _FAIL;
goto exit;
}
INIT_LIST_HEAD(&pcmd->list);
pcmd->cmdcode = GEN_CMD_CODE(_CreateBss);
pcmd->parmbuf = NULL;
pcmd->cmdsz = 0;
pcmd->rsp = NULL;
pcmd->rspsz = 0;
if (flags & RTW_CMDF_WAIT_ACK) {
pcmd->sctx = &sctx;
rtw_sctx_init(&sctx, 2000);
}
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
if (res == _SUCCESS && (flags & RTW_CMDF_WAIT_ACK)) {
rtw_sctx_wait(&sctx);
if (mutex_lock_interruptible(&pcmdpriv->sctx_mutex) == 0) {
if (sctx.status == RTW_SCTX_SUBMITTED)
pcmd->sctx = NULL;
mutex_unlock(&pcmdpriv->sctx_mutex);
}
}
}
exit:
return res;
}
u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
{
u8 res = _SUCCESS;
uint t_len = 0;
struct wlan_bssid_ex *psecnetwork;
struct cmd_obj *pcmd;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
enum ndis_802_11_network_infrastructure ndis_network_mode = pnetwork->network.infrastructure_mode;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u32 tmp_len;
u8 *ptmp = NULL;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (!pcmd) {
res = _FAIL;
goto exit;
}
/* for ies is fix buf size */
t_len = sizeof(struct wlan_bssid_ex);
/* for hidden ap to set fw_state here */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) != true) {
switch (ndis_network_mode) {
case Ndis802_11IBSS:
set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
break;
case Ndis802_11Infrastructure:
set_fwstate(pmlmepriv, WIFI_STATION_STATE);
break;
case Ndis802_11APMode:
case Ndis802_11AutoUnknown:
case Ndis802_11InfrastructureMax:
break;
}
}
psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
memset(psecnetwork, 0, t_len);
memcpy(psecnetwork, &pnetwork->network, get_wlan_bssid_ex_sz(&pnetwork->network));
psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->ie_length;
if ((psecnetwork->ie_length-12) < (256-1))
memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], psecnetwork->ie_length-12);
else
memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], (256-1));
psecnetwork->ie_length = 0;
/* Added by Albert 2009/02/18 */
/* If the driver wants to use the bssid to create the connection. */
/* If not, we have to copy the connecting AP's MAC address to it so that */
/* the driver just has the bssid information for PMKIDList searching. */
if (!pmlmepriv->assoc_by_bssid)
memcpy(&pmlmepriv->assoc_bssid[0], &pnetwork->network.mac_address[0], ETH_ALEN);
psecnetwork->ie_length = rtw_restruct_sec_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0], pnetwork->network.ie_length);
pqospriv->qos_option = 0;
if (pregistrypriv->wmm_enable) {
tmp_len = rtw_restruct_wmm_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0], pnetwork->network.ie_length, psecnetwork->ie_length);
if (psecnetwork->ie_length != tmp_len) {
psecnetwork->ie_length = tmp_len;
pqospriv->qos_option = 1; /* There is WMM IE in this corresp. beacon */
} else {
pqospriv->qos_option = 0;/* There is no WMM IE in this corresp. beacon */
}
}
phtpriv->ht_option = false;
ptmp = rtw_get_ie(&pnetwork->network.ies[12], WLAN_EID_HT_CAPABILITY, &tmp_len, pnetwork->network.ie_length-12);
if (pregistrypriv->ht_enable && ptmp && tmp_len > 0) {
/* Added by Albert 2010/06/23 */
/* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
/* Especially for Realtek 8192u SoftAP. */
if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
rtw_ht_use_default_setting(padapter);
rtw_build_wmm_ie_ht(padapter, &psecnetwork->ies[12], &psecnetwork->ie_length);
/* rtw_restructure_ht_ie */
rtw_restructure_ht_ie(padapter, &pnetwork->network.ies[12], &psecnetwork->ies[0],
pnetwork->network.ie_length-12, &psecnetwork->ie_length,
pnetwork->network.configuration.ds_config);
}
}
rtw_append_exented_cap(padapter, &psecnetwork->ies[0], &psecnetwork->ie_length);
pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pnetwork->network.ies, pnetwork->network.ie_length);
pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);/* get cmdsz before endian conversion */
INIT_LIST_HEAD(&pcmd->list);
pcmd->cmdcode = _JoinBss_CMD_;/* GEN_CMD_CODE(_JoinBss) */
pcmd->parmbuf = (unsigned char *)psecnetwork;
pcmd->rsp = NULL;
pcmd->rspsz = 0;
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
exit:
return res;
}
u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue) /* for sta_mode */
{
struct cmd_obj *cmdobj = NULL;
struct disconnect_parm *param = NULL;
struct cmd_priv *cmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
/* prepare cmd parameter */
param = rtw_zmalloc(sizeof(*param));
if (!param) {
res = _FAIL;
goto exit;
}
param->deauth_timeout_ms = deauth_timeout_ms;
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
cmdobj = rtw_zmalloc(sizeof(*cmdobj));
if (!cmdobj) {
res = _FAIL;
kfree(param);
goto exit;
}
init_h2fwcmd_w_parm_no_rsp(cmdobj, param, _DisConnect_CMD_);
res = rtw_enqueue_cmd(cmdpriv, cmdobj);
} else {
/* no need to enqueue, do the cmd hdl directly and free cmd parameter */
if (disconnect_hdl(padapter, (u8 *)param) != H2C_SUCCESS)
res = _FAIL;
kfree(param);
}
exit:
return res;
}
u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infrastructure networktype, bool enqueue)
{
struct cmd_obj *ph2c;
struct setopmode_parm *psetop;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
psetop = rtw_zmalloc(sizeof(struct setopmode_parm));
if (!psetop) {
res = _FAIL;
goto exit;
}
psetop->mode = (u8)networktype;
if (enqueue) {
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
kfree(psetop);
res = _FAIL;
goto exit;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetop, _SetOpMode_CMD_);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
} else {
setopmode_hdl(padapter, (u8 *)psetop);
kfree(psetop);
}
exit:
return res;
}
u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_key, bool enqueue)
{
struct cmd_obj *ph2c;
struct set_stakey_parm *psetstakey_para;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct set_stakey_rsp *psetstakey_rsp = NULL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
u8 res = _SUCCESS;
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (!psetstakey_para) {
res = _FAIL;
goto exit;
}
memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
psetstakey_para->algorithm = (unsigned char)psecuritypriv->dot11PrivacyAlgrthm;
else
GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false);
if (unicast_key)
memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16);
else
memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16);
/* jeff: set this because at least sw key is ready */
padapter->securitypriv.busetkipkey = true;
if (enqueue) {
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
kfree(psetstakey_para);
res = _FAIL;
goto exit;
}
psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp));
if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
res = _FAIL;
goto exit;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
ph2c->rsp = (u8 *)psetstakey_rsp;
ph2c->rspsz = sizeof(struct set_stakey_rsp);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
} else {
set_stakey_hdl(padapter, (u8 *)psetstakey_para);
kfree(psetstakey_para);
}
exit:
return res;
}
u8 rtw_clearstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 enqueue)
{
struct cmd_obj *ph2c;
struct set_stakey_parm *psetstakey_para;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct set_stakey_rsp *psetstakey_rsp = NULL;
s16 cam_id = 0;
u8 res = _SUCCESS;
if (!enqueue) {
while ((cam_id = rtw_camid_search(padapter, sta->hwaddr, -1)) >= 0) {
netdev_dbg(padapter->pnetdev,
"clear key for addr:%pM, camid:%d\n",
MAC_ARG(sta->hwaddr), cam_id);
clear_cam_entry(padapter, cam_id);
rtw_camid_free(padapter, cam_id);
}
} else {
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp));
if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
res = _FAIL;
goto exit;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
ph2c->rsp = (u8 *)psetstakey_rsp;
ph2c->rspsz = sizeof(struct set_stakey_rsp);
memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
psetstakey_para->algorithm = _NO_PRIVACY_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
}
exit:
return res;
}
u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
{
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct cmd_obj *ph2c;
struct addBaReq_parm *paddbareq_parm;
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
paddbareq_parm = rtw_zmalloc(sizeof(struct addBaReq_parm));
if (!paddbareq_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
paddbareq_parm->tid = tid;
memcpy(paddbareq_parm->addr, addr, ETH_ALEN);
init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq));
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
/* add for CONFIG_IEEE80211W, none 11w can use it */
u8 rtw_reset_securitypriv_cmd(struct adapter *padapter)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm->ec_id = RESET_SECURITYPRIV;
pdrvextra_cmd_parm->type = 0;
pdrvextra_cmd_parm->size = 0;
pdrvextra_cmd_parm->pbuf = NULL;
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
u8 rtw_free_assoc_resources_cmd(struct adapter *padapter)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm->ec_id = FREE_ASSOC_RESOURCES;
pdrvextra_cmd_parm->type = 0;
pdrvextra_cmd_parm->size = 0;
pdrvextra_cmd_parm->pbuf = NULL;
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
/* only primary padapter does this cmd */
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm->ec_id = DYNAMIC_CHK_WK_CID;
pdrvextra_cmd_parm->type = 0;
pdrvextra_cmd_parm->size = 0;
pdrvextra_cmd_parm->pbuf = NULL;
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
static void collect_traffic_statistics(struct adapter *padapter)
{
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
/* Tx */
pdvobjpriv->traffic_stat.tx_bytes = padapter->xmitpriv.tx_bytes;
pdvobjpriv->traffic_stat.tx_pkts = padapter->xmitpriv.tx_pkts;
pdvobjpriv->traffic_stat.tx_drop = padapter->xmitpriv.tx_drop;
/* Rx */
pdvobjpriv->traffic_stat.rx_bytes = padapter->recvpriv.rx_bytes;
pdvobjpriv->traffic_stat.rx_pkts = padapter->recvpriv.rx_pkts;
pdvobjpriv->traffic_stat.rx_drop = padapter->recvpriv.rx_drop;
/* Calculate throughput in last interval */
pdvobjpriv->traffic_stat.cur_tx_bytes = pdvobjpriv->traffic_stat.tx_bytes - pdvobjpriv->traffic_stat.last_tx_bytes;
pdvobjpriv->traffic_stat.cur_rx_bytes = pdvobjpriv->traffic_stat.rx_bytes - pdvobjpriv->traffic_stat.last_rx_bytes;
pdvobjpriv->traffic_stat.last_tx_bytes = pdvobjpriv->traffic_stat.tx_bytes;
pdvobjpriv->traffic_stat.last_rx_bytes = pdvobjpriv->traffic_stat.rx_bytes;
pdvobjpriv->traffic_stat.cur_tx_tp = (u32)(pdvobjpriv->traffic_stat.cur_tx_bytes * 8/2/1024/1024);
pdvobjpriv->traffic_stat.cur_rx_tp = (u32)(pdvobjpriv->traffic_stat.cur_rx_bytes * 8/2/1024/1024);
}
u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer)
{
u8 bEnterPS = false;
u16 BusyThresholdHigh = 25;
u16 BusyThresholdLow = 10;
u16 BusyThreshold = BusyThresholdHigh;
u8 bBusyTraffic = false, bTxBusyTraffic = false, bRxBusyTraffic = false;
u8 bHigherBusyTraffic = false, bHigherBusyRxTraffic = false, bHigherBusyTxTraffic = false;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
collect_traffic_statistics(padapter);
/* */
/* Determine if our traffic is busy now */
/* */
if ((check_fwstate(pmlmepriv, _FW_LINKED))
/*&& !MgntInitAdapterInProgress(pMgntInfo)*/) {
/* if we raise bBusyTraffic in last watchdog, using lower threshold. */
if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
BusyThreshold = BusyThresholdLow;
if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > BusyThreshold ||
pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > BusyThreshold) {
bBusyTraffic = true;
if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
bRxBusyTraffic = true;
else
bTxBusyTraffic = true;
}
/* Higher Tx/Rx data. */
if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 4000 ||
pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 4000) {
bHigherBusyTraffic = true;
if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
bHigherBusyRxTraffic = true;
else
bHigherBusyTxTraffic = true;
}
/* check traffic for powersaving. */
if (((pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod + pmlmepriv->LinkDetectInfo.NumTxOkInPeriod) > 8) ||
(pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod > 2)) {
bEnterPS = false;
if (bBusyTraffic) {
if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount <= 4)
pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 4;
pmlmepriv->LinkDetectInfo.TrafficTransitionCount++;
if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount > 30/*TrafficTransitionLevel*/)
pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 30;
}
} else {
if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount >= 2)
pmlmepriv->LinkDetectInfo.TrafficTransitionCount -= 2;
else
pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 0;
if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount == 0)
bEnterPS = true;
}
/* LeisurePS only work in infra mode. */
if (bEnterPS) {
if (!from_timer)
LPS_Enter(padapter, "TRAFFIC_IDLE");
} else {
if (!from_timer)
LPS_Leave(padapter, "TRAFFIC_BUSY");
else
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_TRAFFIC_BUSY, 1);
}
} else {
struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
int n_assoc_iface = 0;
if (check_fwstate(&dvobj->padapters->mlmepriv, WIFI_ASOC_STATE))
n_assoc_iface++;
if (!from_timer && n_assoc_iface == 0)
LPS_Leave(padapter, "NON_LINKED");
}
pmlmepriv->LinkDetectInfo.NumRxOkInPeriod = 0;
pmlmepriv->LinkDetectInfo.NumTxOkInPeriod = 0;
pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
pmlmepriv->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
pmlmepriv->LinkDetectInfo.bTxBusyTraffic = bTxBusyTraffic;
pmlmepriv->LinkDetectInfo.bRxBusyTraffic = bRxBusyTraffic;
pmlmepriv->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic;
pmlmepriv->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic;
return bEnterPS;
}
static void dynamic_chk_wk_hdl(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv;
pmlmepriv = &padapter->mlmepriv;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
expire_timeout_chk(padapter);
/* for debug purpose */
_linked_info_dump(padapter);
/* if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING|_FW_UNDER_SURVEY) ==false) */
{
linked_status_chk(padapter);
traffic_status_watchdog(padapter, 0);
}
rtw_hal_dm_watchdog(padapter);
/* check_hw_pbc(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type); */
/* */
/* BT-Coexist */
/* */
hal_btcoex_Handler(padapter);
/* always call rtw_ps_processor() at last one. */
if (is_primary_adapter(padapter))
rtw_ps_processor(padapter);
}
void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type);
void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
{
struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 mstatus;
if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
return;
}
switch (lps_ctrl_type) {
case LPS_CTRL_SCAN:
hal_btcoex_ScanNotify(padapter, true);
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
/* connect */
LPS_Leave(padapter, "LPS_CTRL_SCAN");
}
break;
case LPS_CTRL_JOINBSS:
LPS_Leave(padapter, "LPS_CTRL_JOINBSS");
break;
case LPS_CTRL_CONNECT:
mstatus = 1;/* connect */
/* Reset LPS Setting */
pwrpriv->LpsIdleCount = 0;
rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
rtw_btcoex_MediaStatusNotify(padapter, mstatus);
break;
case LPS_CTRL_DISCONNECT:
mstatus = 0;/* disconnect */
rtw_btcoex_MediaStatusNotify(padapter, mstatus);
LPS_Leave(padapter, "LPS_CTRL_DISCONNECT");
rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
break;
case LPS_CTRL_SPECIAL_PACKET:
pwrpriv->DelayLPSLastTimeStamp = jiffies;
hal_btcoex_SpecialPacketNotify(padapter, PACKET_DHCP);
LPS_Leave(padapter, "LPS_CTRL_SPECIAL_PACKET");
break;
case LPS_CTRL_LEAVE:
LPS_Leave(padapter, "LPS_CTRL_LEAVE");
break;
case LPS_CTRL_TRAFFIC_BUSY:
LPS_Leave(padapter, "LPS_CTRL_TRAFFIC_BUSY");
break;
default:
break;
}
}
u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
/* struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter); */
u8 res = _SUCCESS;
/* if (!pwrctrlpriv->bLeisurePs) */
/* return res; */
if (enqueue) {
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm->ec_id = LPS_CTRL_WK_CID;
pdrvextra_cmd_parm->type = lps_ctrl_type;
pdrvextra_cmd_parm->size = 0;
pdrvextra_cmd_parm->pbuf = NULL;
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
} else {
lps_ctrl_wk_hdl(padapter, lps_ctrl_type);
}
exit:
return res;
}
static void rtw_dm_in_lps_hdl(struct adapter *padapter)
{
rtw_hal_set_hwreg(padapter, HW_VAR_DM_IN_LPS, NULL);
}
u8 rtw_dm_in_lps_wk_cmd(struct adapter *padapter)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm->ec_id = DM_IN_LPS_WK_CID;
pdrvextra_cmd_parm->type = 0;
pdrvextra_cmd_parm->size = 0;
pdrvextra_cmd_parm->pbuf = NULL;
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
static void rtw_lps_change_dtim_hdl(struct adapter *padapter, u8 dtim)
{
struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
if (dtim <= 0 || dtim > 16)
return;
if (hal_btcoex_IsBtControlLps(padapter))
return;
mutex_lock(&pwrpriv->lock);
pwrpriv->dtim = dtim;
if (pwrpriv->fw_current_in_ps_mode && (pwrpriv->pwr_mode > PS_MODE_ACTIVE)) {
u8 ps_mode = pwrpriv->pwr_mode;
rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
}
mutex_unlock(&pwrpriv->lock);
}
static void rtw_dm_ra_mask_hdl(struct adapter *padapter, struct sta_info *psta)
{
if (psta)
set_sta_rate(padapter, psta);
}
u8 rtw_dm_ra_mask_wk_cmd(struct adapter *padapter, u8 *psta)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm->ec_id = DM_RA_MSK_WK_CID;
pdrvextra_cmd_parm->type = 0;
pdrvextra_cmd_parm->size = 0;
pdrvextra_cmd_parm->pbuf = psta;
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
u8 rtw_ps_cmd(struct adapter *padapter)
{
struct cmd_obj *ppscmd;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
ppscmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ppscmd) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (!pdrvextra_cmd_parm) {
kfree(ppscmd);
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm->ec_id = POWER_SAVING_CTRL_WK_CID;
pdrvextra_cmd_parm->type = 0;
pdrvextra_cmd_parm->size = 0;
pdrvextra_cmd_parm->pbuf = NULL;
init_h2fwcmd_w_parm_no_rsp(ppscmd, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
res = rtw_enqueue_cmd(pcmdpriv, ppscmd);
exit:
return res;
}
u32 g_wait_hiq_empty;
static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
{
struct sta_info *psta_bmc;
struct sta_priv *pstapriv = &padapter->stapriv;
unsigned long start = jiffies;
u8 empty = false;
psta_bmc = rtw_get_bcmc_stainfo(padapter);
if (!psta_bmc)
return;
rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &empty);
while (!empty && jiffies_to_msecs(jiffies - start) < g_wait_hiq_empty) {
msleep(100);
rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &empty);
}
if (psta_bmc->sleepq_len == 0) {
if (empty == _SUCCESS) {
bool update_tim = false;
if (pstapriv->tim_bitmap & BIT(0))
update_tim = true;
pstapriv->tim_bitmap &= ~BIT(0);
pstapriv->sta_dz_bitmap &= ~BIT(0);
if (update_tim)
update_beacon(padapter, WLAN_EID_TIM, NULL, true);
} else {/* re check again */
rtw_chk_hi_queue_cmd(padapter);
}
}
}
u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm->ec_id = CHECK_HIQ_WK_CID;
pdrvextra_cmd_parm->type = 0;
pdrvextra_cmd_parm->size = 0;
pdrvextra_cmd_parm->pbuf = NULL;
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
struct btinfo {
u8 cid;
u8 len;
u8 bConnection:1;
u8 bSCOeSCO:1;
u8 bInQPage:1;
u8 bACLBusy:1;
u8 bSCOBusy:1;
u8 bHID:1;
u8 bA2DP:1;
u8 bFTP:1;
u8 retry_cnt:4;
u8 rsvd_34:1;
u8 rsvd_35:1;
u8 rsvd_36:1;
u8 rsvd_37:1;
u8 rssi;
u8 rsvd_50:1;
u8 rsvd_51:1;
u8 rsvd_52:1;
u8 rsvd_53:1;
u8 rsvd_54:1;
u8 rsvd_55:1;
u8 eSCO_SCO:1;
u8 Master_Slave:1;
u8 rsvd_6;
u8 rsvd_7;
};
static void rtw_btinfo_hdl(struct adapter *adapter, u8 *buf, u16 buf_len)
{
#define BTINFO_WIFI_FETCH 0x23
#define BTINFO_BT_AUTO_RPT 0x27
struct btinfo *info = (struct btinfo *)buf;
u8 cmd_idx;
u8 len;
cmd_idx = info->cid;
if (info->len > buf_len-2) {
rtw_warn_on(1);
len = buf_len-2;
} else {
len = info->len;
}
/* transform BT-FW btinfo to WiFI-FW C2H format and notify */
if (cmd_idx == BTINFO_WIFI_FETCH)
buf[1] = 0;
else if (cmd_idx == BTINFO_BT_AUTO_RPT)
buf[1] = 2;
hal_btcoex_BtInfoNotify(adapter, len+1, &buf[1]);
}
u8 rtw_c2h_packet_wk_cmd(struct adapter *padapter, u8 *pbuf, u16 length)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm->ec_id = C2H_WK_CID;
pdrvextra_cmd_parm->type = 0;
pdrvextra_cmd_parm->size = length;
pdrvextra_cmd_parm->pbuf = pbuf;
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
/* dont call R/W in this function, beucase SDIO interrupt have claim host */
/* or deadlock will happen and cause special-systemserver-died in android */
u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm->ec_id = C2H_WK_CID;
pdrvextra_cmd_parm->type = 0;
pdrvextra_cmd_parm->size = c2h_evt?16:0;
pdrvextra_cmd_parm->pbuf = c2h_evt;
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
static void c2h_wk_callback(struct work_struct *work)
{
struct evt_priv *evtpriv = container_of(work, struct evt_priv, c2h_wk);
struct adapter *adapter = container_of(evtpriv, struct adapter, evtpriv);
u8 *c2h_evt;
c2h_id_filter ccx_id_filter = rtw_hal_c2h_id_filter_ccx(adapter);
evtpriv->c2h_wk_alive = true;
while (!rtw_cbuf_empty(evtpriv->c2h_queue)) {
c2h_evt = (u8 *)rtw_cbuf_pop(evtpriv->c2h_queue);
if (c2h_evt) {
/* This C2H event is read, clear it */
c2h_evt_clear(adapter);
} else {
c2h_evt = rtw_malloc(16);
if (c2h_evt) {
/* This C2H event is not read, read & clear now */
if (c2h_evt_read_88xx(adapter, c2h_evt) != _SUCCESS) {
kfree(c2h_evt);
continue;
}
}
}
/* Special pointer to trigger c2h_evt_clear only */
if ((void *)c2h_evt == (void *)evtpriv)
continue;
if (!rtw_hal_c2h_valid(adapter, c2h_evt)) {
kfree(c2h_evt);
continue;
}
if (ccx_id_filter(c2h_evt)) {
/* Handle CCX report here */
rtw_hal_c2h_handler(adapter, c2h_evt);
kfree(c2h_evt);
} else {
/* Enqueue into cmd_thread for others */
rtw_c2h_wk_cmd(adapter, c2h_evt);
}
}
evtpriv->c2h_wk_alive = false;
}
u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
{
struct drvextra_cmd_parm *pdrvextra_cmd;
if (!pbuf)
return H2C_PARAMETERS_ERROR;
pdrvextra_cmd = (struct drvextra_cmd_parm *)pbuf;
switch (pdrvextra_cmd->ec_id) {
case DYNAMIC_CHK_WK_CID:/* only primary padapter go to this cmd, but execute dynamic_chk_wk_hdl() for two interfaces */
dynamic_chk_wk_hdl(padapter);
break;
case POWER_SAVING_CTRL_WK_CID:
rtw_ps_processor(padapter);
break;
case LPS_CTRL_WK_CID:
lps_ctrl_wk_hdl(padapter, (u8)pdrvextra_cmd->type);
break;
case DM_IN_LPS_WK_CID:
rtw_dm_in_lps_hdl(padapter);
break;
case LPS_CHANGE_DTIM_CID:
rtw_lps_change_dtim_hdl(padapter, (u8)pdrvextra_cmd->type);
break;
case CHECK_HIQ_WK_CID:
rtw_chk_hi_queue_hdl(padapter);
break;
/* add for CONFIG_IEEE80211W, none 11w can use it */
case RESET_SECURITYPRIV:
rtw_reset_securitypriv(padapter);
break;
case FREE_ASSOC_RESOURCES:
rtw_free_assoc_resources(padapter, 1);
break;
case C2H_WK_CID:
rtw_hal_set_hwreg_with_buf(padapter, HW_VAR_C2H_HANDLE, pdrvextra_cmd->pbuf, pdrvextra_cmd->size);
break;
case DM_RA_MSK_WK_CID:
rtw_dm_ra_mask_hdl(padapter, (struct sta_info *)pdrvextra_cmd->pbuf);
break;
case BTINFO_WK_CID:
rtw_btinfo_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->size);
break;
default:
break;
}
if (pdrvextra_cmd->pbuf && pdrvextra_cmd->size > 0)
kfree(pdrvextra_cmd->pbuf);
return H2C_SUCCESS;
}
void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (pcmd->res != H2C_SUCCESS) {
/* TODO: cancel timer and do timeout handler directly... */
_set_timer(&pmlmepriv->scan_to_timer, 1);
}
/* free cmd */
rtw_free_cmd_obj(pcmd);
}
void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (pcmd->res != H2C_SUCCESS) {
spin_lock_bh(&pmlmepriv->lock);
set_fwstate(pmlmepriv, _FW_LINKED);
spin_unlock_bh(&pmlmepriv->lock);
return;
}
/* free cmd */
rtw_free_cmd_obj(pcmd);
}
void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (pcmd->res != H2C_SUCCESS) {
/* TODO: cancel timer and do timeout handler directly... */
_set_timer(&pmlmepriv->assoc_timer, 1);
}
rtw_free_cmd_obj(pcmd);
}
void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct sta_info *psta = NULL;
struct wlan_network *pwlan = NULL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
if (!pcmd->parmbuf)
goto exit;
if (pcmd->res != H2C_SUCCESS)
_set_timer(&pmlmepriv->assoc_timer, 1);
del_timer_sync(&pmlmepriv->assoc_timer);
spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
psta = rtw_get_stainfo(&padapter->stapriv, pnetwork->mac_address);
if (!psta) {
psta = rtw_alloc_stainfo(&padapter->stapriv, pnetwork->mac_address);
if (!psta)
goto createbss_cmd_fail;
}
rtw_indicate_connect(padapter);
} else {
pwlan = rtw_alloc_network(pmlmepriv);
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
if (!pwlan) {
pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
if (!pwlan) {
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto createbss_cmd_fail;
}
pwlan->last_scanned = jiffies;
} else {
list_add_tail(&pwlan->list, &pmlmepriv->scanned_queue.queue);
}
pnetwork->length = get_wlan_bssid_ex_sz(pnetwork);
memcpy(&pwlan->network, pnetwork, pnetwork->length);
/* pwlan->fixed = true; */
/* list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue); */
/* copy pdev_network information to pmlmepriv->cur_network */
memcpy(&tgt_network->network, pnetwork, (get_wlan_bssid_ex_sz(pnetwork)));
/* reset ds_config */
/* tgt_network->network.configuration.ds_config = (u32)rtw_ch2freq(pnetwork->configuration.ds_config); */
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
/* we will set _FW_LINKED when there is one more sat to join us (rtw_stassoc_event_callback) */
}
createbss_cmd_fail:
spin_unlock_bh(&pmlmepriv->lock);
exit:
rtw_free_cmd_obj(pcmd);
}
void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct sta_priv *pstapriv = &padapter->stapriv;
struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)(pcmd->rsp);
struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
if (!psta)
goto exit;
exit:
rtw_free_cmd_obj(pcmd);
}
void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *)(pcmd->rsp);
struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
if (!psta)
goto exit;
psta->aid = passocsta_rsp->cam_id;
psta->mac_id = passocsta_rsp->cam_id;
spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) && check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
set_fwstate(pmlmepriv, _FW_LINKED);
spin_unlock_bh(&pmlmepriv->lock);
exit:
rtw_free_cmd_obj(pcmd);
}
| linux-master | drivers/staging/rtl8723bs/core/rtw_cmd.c |
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
#include <drv_types.h>
#include <rtw_debug.h>
#include <hal_data.h>
#include <linux/jiffies.h>
/* Define global variables */
u8 fakeEfuseBank;
u32 fakeEfuseUsedBytes;
u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE] = {0};
u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN] = {0};
u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN] = {0};
u32 BTEfuseUsedBytes;
u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
u32 fakeBTEfuseUsedBytes;
u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
#define REG_EFUSE_CTRL 0x0030
#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
static bool
Efuse_Read1ByteFromFakeContent(u16 Offset, u8 *Value)
{
if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
if (fakeEfuseBank == 0)
*Value = fakeEfuseContent[Offset];
else
*Value = fakeBTEfuseContent[fakeEfuseBank-1][Offset];
return true;
}
static bool
Efuse_Write1ByteToFakeContent(u16 Offset, u8 Value)
{
if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
if (fakeEfuseBank == 0)
fakeEfuseContent[Offset] = Value;
else
fakeBTEfuseContent[fakeEfuseBank-1][Offset] = Value;
return true;
}
/*-----------------------------------------------------------------------------
* Function: Efuse_PowerSwitch
*
* Overview: When we want to enable write operation, we should change to
* pwr on state. When we stop write, we should switch to 500k mode
* and disable LDO 2.5V.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 11/17/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
void
Efuse_PowerSwitch(
struct adapter *padapter,
u8 bWrite,
u8 PwrState)
{
padapter->HalFunc.EfusePowerSwitch(padapter, bWrite, PwrState);
}
/*-----------------------------------------------------------------------------
* Function: Efuse_GetCurrentSize
*
* Overview: Get current efuse size!!!
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 11/16/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
u16
Efuse_GetCurrentSize(
struct adapter *padapter,
u8 efuseType,
bool bPseudoTest)
{
return padapter->HalFunc.EfuseGetCurrentSize(padapter, efuseType,
bPseudoTest);
}
/* 11/16/2008 MH Add description. Get current efuse area enabled word!!. */
u8
Efuse_CalculateWordCnts(u8 word_en)
{
u8 word_cnts = 0;
if (!(word_en & BIT(0)))
word_cnts++; /* 0 : write enable */
if (!(word_en & BIT(1)))
word_cnts++;
if (!(word_en & BIT(2)))
word_cnts++;
if (!(word_en & BIT(3)))
word_cnts++;
return word_cnts;
}
/* */
/* Description: */
/* 1. Execute E-Fuse read byte operation according as map offset and */
/* save to E-Fuse table. */
/* 2. Referred from SD1 Richard. */
/* */
/* Assumption: */
/* 1. Boot from E-Fuse and successfully auto-load. */
/* 2. PASSIVE_LEVEL (USB interface) */
/* */
/* Created by Roger, 2008.10.21. */
/* */
/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
/* 2. Add efuse utilization collect. */
/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
/* write addr must be after sec5. */
/* */
void
efuse_ReadEFuse(
struct adapter *Adapter,
u8 efuseType,
u16 _offset,
u16 _size_byte,
u8 *pbuf,
bool bPseudoTest
);
void
efuse_ReadEFuse(
struct adapter *Adapter,
u8 efuseType,
u16 _offset,
u16 _size_byte,
u8 *pbuf,
bool bPseudoTest
)
{
Adapter->HalFunc.ReadEFuse(Adapter, efuseType, _offset, _size_byte, pbuf, bPseudoTest);
}
void
EFUSE_GetEfuseDefinition(
struct adapter *padapter,
u8 efuseType,
u8 type,
void *pOut,
bool bPseudoTest
)
{
padapter->HalFunc.EFUSEGetEfuseDefinition(padapter, efuseType, type, pOut, bPseudoTest);
}
/*-----------------------------------------------------------------------------
* Function: EFUSE_Read1Byte
*
* Overview: Copy from WMAC fot EFUSE read 1 byte.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 09/23/2008 MHC Copy from WMAC.
*
*---------------------------------------------------------------------------*/
u8
EFUSE_Read1Byte(
struct adapter *Adapter,
u16 Address)
{
u8 Bytetemp = {0x00};
u8 temp = {0x00};
u32 k = 0;
u16 contentLen = 0;
EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&contentLen, false);
if (Address < contentLen) {/* E-fuse 512Byte */
/* Write E-fuse Register address bit0~7 */
temp = Address & 0xFF;
rtw_write8(Adapter, EFUSE_CTRL+1, temp);
Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+2);
/* Write E-fuse Register address bit8~9 */
temp = ((Address >> 8) & 0x03) | (Bytetemp & 0xFC);
rtw_write8(Adapter, EFUSE_CTRL+2, temp);
/* Write 0x30[31]= 0 */
Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
temp = Bytetemp & 0x7F;
rtw_write8(Adapter, EFUSE_CTRL+3, temp);
/* Wait Write-ready (0x30[31]= 1) */
Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
while (!(Bytetemp & 0x80)) {
Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
k++;
if (k == 1000)
break;
}
return rtw_read8(Adapter, EFUSE_CTRL);
} else
return 0xFF;
} /* EFUSE_Read1Byte */
/* 11/16/2008 MH Read one byte from real Efuse. */
u8
efuse_OneByteRead(
struct adapter *padapter,
u16 addr,
u8 *data,
bool bPseudoTest)
{
u32 tmpidx = 0;
u8 bResult;
u8 readbyte;
if (bPseudoTest)
return Efuse_Read1ByteFromFakeContent(addr, data);
/* <20130121, Kordan> For SMIC EFUSE specificatoin. */
/* 0x34[11]: SW force PGMEN input of efuse to high. (for the bank selected by 0x34[9:8]) */
/* PHY_SetMacReg(padapter, 0x34, BIT11, 0); */
rtw_write16(padapter, 0x34, rtw_read16(padapter, 0x34) & (~BIT11));
/* -----------------e-fuse reg ctrl --------------------------------- */
/* address */
rtw_write8(padapter, EFUSE_CTRL+1, (u8)(addr&0xff));
rtw_write8(padapter, EFUSE_CTRL+2, ((u8)((addr>>8) & 0x03)) |
(rtw_read8(padapter, EFUSE_CTRL+2)&0xFC));
/* rtw_write8(padapter, EFUSE_CTRL+3, 0x72); read cmd */
/* Write bit 32 0 */
readbyte = rtw_read8(padapter, EFUSE_CTRL+3);
rtw_write8(padapter, EFUSE_CTRL+3, (readbyte & 0x7f));
while (!(0x80 & rtw_read8(padapter, EFUSE_CTRL+3)) && (tmpidx < 1000)) {
mdelay(1);
tmpidx++;
}
if (tmpidx < 100) {
*data = rtw_read8(padapter, EFUSE_CTRL);
bResult = true;
} else {
*data = 0xff;
bResult = false;
}
return bResult;
}
/* 11/16/2008 MH Write one byte to reald Efuse. */
u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoTest)
{
u8 tmpidx = 0;
u8 bResult = false;
u32 efuseValue;
if (bPseudoTest)
return Efuse_Write1ByteToFakeContent(addr, data);
/* -----------------e-fuse reg ctrl --------------------------------- */
/* address */
efuseValue = rtw_read32(padapter, EFUSE_CTRL);
efuseValue |= (BIT21|BIT31);
efuseValue &= ~(0x3FFFF);
efuseValue |= ((addr<<8 | data) & 0x3FFFF);
/* <20130227, Kordan> 8192E MP chip A-cut had better not set 0x34[11] until B-Cut. */
/* <20130121, Kordan> For SMIC EFUSE specificatoin. */
/* 0x34[11]: SW force PGMEN input of efuse to high. (for the bank selected by 0x34[9:8]) */
/* PHY_SetMacReg(padapter, 0x34, BIT11, 1); */
rtw_write16(padapter, 0x34, rtw_read16(padapter, 0x34) | (BIT11));
rtw_write32(padapter, EFUSE_CTRL, 0x90600000|((addr<<8 | data)));
while ((0x80 & rtw_read8(padapter, EFUSE_CTRL+3)) && (tmpidx < 100)) {
mdelay(1);
tmpidx++;
}
if (tmpidx < 100)
bResult = true;
else
bResult = false;
/* disable Efuse program enable */
PHY_SetMacReg(padapter, EFUSE_TEST, BIT(11), 0);
return bResult;
}
int
Efuse_PgPacketRead(struct adapter *padapter,
u8 offset,
u8 *data,
bool bPseudoTest)
{
return padapter->HalFunc.Efuse_PgPacketRead(padapter, offset, data,
bPseudoTest);
}
int
Efuse_PgPacketWrite(struct adapter *padapter,
u8 offset,
u8 word_en,
u8 *data,
bool bPseudoTest)
{
return padapter->HalFunc.Efuse_PgPacketWrite(padapter, offset, word_en,
data, bPseudoTest);
}
/*-----------------------------------------------------------------------------
* Function: efuse_WordEnableDataRead
*
* Overview: Read allowed word in current efuse section data.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 11/16/2008 MHC Create Version 0.
* 11/21/2008 MHC Fix Write bug when we only enable late word.
*
*---------------------------------------------------------------------------*/
void
efuse_WordEnableDataRead(u8 word_en,
u8 *sourdata,
u8 *targetdata)
{
if (!(word_en&BIT(0))) {
targetdata[0] = sourdata[0];
targetdata[1] = sourdata[1];
}
if (!(word_en&BIT(1))) {
targetdata[2] = sourdata[2];
targetdata[3] = sourdata[3];
}
if (!(word_en&BIT(2))) {
targetdata[4] = sourdata[4];
targetdata[5] = sourdata[5];
}
if (!(word_en&BIT(3))) {
targetdata[6] = sourdata[6];
targetdata[7] = sourdata[7];
}
}
u8
Efuse_WordEnableDataWrite(struct adapter *padapter,
u16 efuse_addr,
u8 word_en,
u8 *data,
bool bPseudoTest)
{
return padapter->HalFunc.Efuse_WordEnableDataWrite(padapter, efuse_addr,
word_en, data,
bPseudoTest);
}
/*-----------------------------------------------------------------------------
* Function: Efuse_ReadAllMap
*
* Overview: Read All Efuse content
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 11/11/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
void
Efuse_ReadAllMap(
struct adapter *padapter,
u8 efuseType,
u8 *Efuse,
bool bPseudoTest);
void Efuse_ReadAllMap(struct adapter *padapter, u8 efuseType, u8 *Efuse, bool bPseudoTest)
{
u16 mapLen = 0;
Efuse_PowerSwitch(padapter, false, true);
EFUSE_GetEfuseDefinition(padapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, bPseudoTest);
efuse_ReadEFuse(padapter, efuseType, 0, mapLen, Efuse, bPseudoTest);
Efuse_PowerSwitch(padapter, false, false);
}
/*-----------------------------------------------------------------------------
* Function: efuse_ShadowRead1Byte
* efuse_ShadowRead2Byte
* efuse_ShadowRead4Byte
*
* Overview: Read from efuse init map by one/two/four bytes !!!!!
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 11/12/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
static void efuse_ShadowRead1Byte(struct adapter *padapter, u16 Offset, u8 *Value)
{
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
*Value = pEEPROM->efuse_eeprom_data[Offset];
} /* EFUSE_ShadowRead1Byte */
/* Read Two Bytes */
static void efuse_ShadowRead2Byte(struct adapter *padapter, u16 Offset, u16 *Value)
{
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
*Value = pEEPROM->efuse_eeprom_data[Offset];
*Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
} /* EFUSE_ShadowRead2Byte */
/* Read Four Bytes */
static void efuse_ShadowRead4Byte(struct adapter *padapter, u16 Offset, u32 *Value)
{
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
*Value = pEEPROM->efuse_eeprom_data[Offset];
*Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
*Value |= pEEPROM->efuse_eeprom_data[Offset+2]<<16;
*Value |= pEEPROM->efuse_eeprom_data[Offset+3]<<24;
} /* efuse_ShadowRead4Byte */
/*-----------------------------------------------------------------------------
* Function: EFUSE_ShadowMapUpdate
*
* Overview: Transfer current EFUSE content to shadow init and modify map.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 11/13/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
void EFUSE_ShadowMapUpdate(struct adapter *padapter, u8 efuseType, bool bPseudoTest)
{
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
u16 mapLen = 0;
EFUSE_GetEfuseDefinition(padapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, bPseudoTest);
if (pEEPROM->bautoload_fail_flag)
memset(pEEPROM->efuse_eeprom_data, 0xFF, mapLen);
else
Efuse_ReadAllMap(padapter, efuseType, pEEPROM->efuse_eeprom_data, bPseudoTest);
/* PlatformMoveMemory((void *)&pHalData->EfuseMap[EFUSE_MODIFY_MAP][0], */
/* void *)&pHalData->EfuseMap[EFUSE_INIT_MAP][0], mapLen); */
} /* EFUSE_ShadowMapUpdate */
/*-----------------------------------------------------------------------------
* Function: EFUSE_ShadowRead
*
* Overview: Read from efuse init map !!!!!
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 11/12/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
void EFUSE_ShadowRead(struct adapter *padapter, u8 Type, u16 Offset, u32 *Value)
{
if (Type == 1)
efuse_ShadowRead1Byte(padapter, Offset, (u8 *)Value);
else if (Type == 2)
efuse_ShadowRead2Byte(padapter, Offset, (u16 *)Value);
else if (Type == 4)
efuse_ShadowRead4Byte(padapter, Offset, (u32 *)Value);
} /* EFUSE_ShadowRead*/
| linux-master | drivers/staging/rtl8723bs/core/rtw_efuse.c |
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
#include <drv_types.h>
#include <rtw_debug.h>
#include <rtw_wifi_regd.h>
#include <hal_btcoex.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
static struct mlme_handler mlme_sta_tbl[] = {
{WIFI_ASSOCREQ, "OnAssocReq", &OnAssocReq},
{WIFI_ASSOCRSP, "OnAssocRsp", &OnAssocRsp},
{WIFI_REASSOCREQ, "OnReAssocReq", &OnAssocReq},
{WIFI_REASSOCRSP, "OnReAssocRsp", &OnAssocRsp},
{WIFI_PROBEREQ, "OnProbeReq", &OnProbeReq},
{WIFI_PROBERSP, "OnProbeRsp", &OnProbeRsp},
/*----------------------------------------------------------
below 2 are reserved
-----------------------------------------------------------*/
{0, "DoReserved", &DoReserved},
{0, "DoReserved", &DoReserved},
{WIFI_BEACON, "OnBeacon", &OnBeacon},
{WIFI_ATIM, "OnATIM", &OnAtim},
{WIFI_DISASSOC, "OnDisassoc", &OnDisassoc},
{WIFI_AUTH, "OnAuth", &OnAuthClient},
{WIFI_DEAUTH, "OnDeAuth", &OnDeAuth},
{WIFI_ACTION, "OnAction", &OnAction},
{WIFI_ACTION_NOACK, "OnActionNoAck", &OnAction},
};
static struct action_handler OnAction_tbl[] = {
{RTW_WLAN_CATEGORY_SPECTRUM_MGMT, "ACTION_SPECTRUM_MGMT", on_action_spct},
{RTW_WLAN_CATEGORY_QOS, "ACTION_QOS", &DoReserved},
{RTW_WLAN_CATEGORY_DLS, "ACTION_DLS", &DoReserved},
{RTW_WLAN_CATEGORY_BACK, "ACTION_BACK", &OnAction_back},
{RTW_WLAN_CATEGORY_PUBLIC, "ACTION_PUBLIC", on_action_public},
{RTW_WLAN_CATEGORY_RADIO_MEASUREMENT, "ACTION_RADIO_MEASUREMENT", &DoReserved},
{RTW_WLAN_CATEGORY_FT, "ACTION_FT", &DoReserved},
{RTW_WLAN_CATEGORY_HT, "ACTION_HT", &OnAction_ht},
{RTW_WLAN_CATEGORY_SA_QUERY, "ACTION_SA_QUERY", &OnAction_sa_query},
{RTW_WLAN_CATEGORY_UNPROTECTED_WNM, "ACTION_UNPROTECTED_WNM", &DoReserved},
{RTW_WLAN_CATEGORY_SELF_PROTECTED, "ACTION_SELF_PROTECTED", &DoReserved},
{RTW_WLAN_CATEGORY_WMM, "ACTION_WMM", &DoReserved},
{RTW_WLAN_CATEGORY_P2P, "ACTION_P2P", &DoReserved},
};
static u8 null_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
/**************************************************
OUI definitions for the vendor specific IE
***************************************************/
unsigned char RTW_WPA_OUI[] = {0x00, 0x50, 0xf2, 0x01};
unsigned char WMM_OUI[] = {0x00, 0x50, 0xf2, 0x02};
unsigned char WPS_OUI[] = {0x00, 0x50, 0xf2, 0x04};
unsigned char P2P_OUI[] = {0x50, 0x6F, 0x9A, 0x09};
unsigned char WFD_OUI[] = {0x50, 0x6F, 0x9A, 0x0A};
unsigned char WMM_INFO_OUI[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
unsigned char WMM_PARA_OUI[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
static unsigned char REALTEK_96B_IE[] = {0x00, 0xe0, 0x4c, 0x02, 0x01, 0x20};
/********************************************************
ChannelPlan definitions
*********************************************************/
static struct rt_channel_plan_2g RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x00, RT_CHANNEL_DOMAIN_2G_WORLD , Passive scan CH 12, 13 */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x01, RT_CHANNEL_DOMAIN_2G_ETSI1 */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* 0x02, RT_CHANNEL_DOMAIN_2G_FCC1 */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14}, /* 0x03, RT_CHANNEL_DOMAIN_2G_MIKK1 */
{{10, 11, 12, 13}, 4}, /* 0x04, RT_CHANNEL_DOMAIN_2G_ETSI2 */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14}, /* 0x05, RT_CHANNEL_DOMAIN_2G_GLOBAL , Passive scan CH 12, 13, 14 */
{{}, 0}, /* 0x06, RT_CHANNEL_DOMAIN_2G_NULL */
};
static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
/* 0x00 ~ 0x1F , Old Define ===== */
{0x02}, /* 0x00, RT_CHANNEL_DOMAIN_FCC */
{0x02}, /* 0x01, RT_CHANNEL_DOMAIN_IC */
{0x01}, /* 0x02, RT_CHANNEL_DOMAIN_ETSI */
{0x01}, /* 0x03, RT_CHANNEL_DOMAIN_SPAIN */
{0x01}, /* 0x04, RT_CHANNEL_DOMAIN_FRANCE */
{0x03}, /* 0x05, RT_CHANNEL_DOMAIN_MKK */
{0x03}, /* 0x06, RT_CHANNEL_DOMAIN_MKK1 */
{0x01}, /* 0x07, RT_CHANNEL_DOMAIN_ISRAEL */
{0x03}, /* 0x08, RT_CHANNEL_DOMAIN_TELEC */
{0x03}, /* 0x09, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN */
{0x00}, /* 0x0A, RT_CHANNEL_DOMAIN_WORLD_WIDE_13 */
{0x02}, /* 0x0B, RT_CHANNEL_DOMAIN_TAIWAN */
{0x01}, /* 0x0C, RT_CHANNEL_DOMAIN_CHINA */
{0x02}, /* 0x0D, RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO */
{0x02}, /* 0x0E, RT_CHANNEL_DOMAIN_KOREA */
{0x02}, /* 0x0F, RT_CHANNEL_DOMAIN_TURKEY */
{0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
{0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
{0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
{0x00}, /* 0x13, RT_CHANNEL_DOMAIN_WORLD_WIDE_5G */
{0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
{0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
{0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
{0x03}, /* 0x17, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
{0x06}, /* 0x18, RT_CHANNEL_DOMAIN_PAKISTAN_NO_DFS */
{0x02}, /* 0x19, RT_CHANNEL_DOMAIN_TAIWAN2_NO_DFS */
{0x00}, /* 0x1A, */
{0x00}, /* 0x1B, */
{0x00}, /* 0x1C, */
{0x00}, /* 0x1D, */
{0x00}, /* 0x1E, */
{0x06}, /* 0x1F, RT_CHANNEL_DOMAIN_WORLD_WIDE_ONLY_5G */
/* 0x20 ~ 0x7F , New Define ===== */
{0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
{0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
{0x02}, /* 0x22, RT_CHANNEL_DOMAIN_FCC1_NULL */
{0x03}, /* 0x23, RT_CHANNEL_DOMAIN_MKK1_NULL */
{0x04}, /* 0x24, RT_CHANNEL_DOMAIN_ETSI2_NULL */
{0x02}, /* 0x25, RT_CHANNEL_DOMAIN_FCC1_FCC1 */
{0x00}, /* 0x26, RT_CHANNEL_DOMAIN_WORLD_ETSI1 */
{0x03}, /* 0x27, RT_CHANNEL_DOMAIN_MKK1_MKK1 */
{0x00}, /* 0x28, RT_CHANNEL_DOMAIN_WORLD_KCC1 */
{0x00}, /* 0x29, RT_CHANNEL_DOMAIN_WORLD_FCC2 */
{0x00}, /* 0x2A, */
{0x00}, /* 0x2B, */
{0x00}, /* 0x2C, */
{0x00}, /* 0x2D, */
{0x00}, /* 0x2E, */
{0x00}, /* 0x2F, */
{0x00}, /* 0x30, RT_CHANNEL_DOMAIN_WORLD_FCC3 */
{0x00}, /* 0x31, RT_CHANNEL_DOMAIN_WORLD_FCC4 */
{0x00}, /* 0x32, RT_CHANNEL_DOMAIN_WORLD_FCC5 */
{0x00}, /* 0x33, RT_CHANNEL_DOMAIN_WORLD_FCC6 */
{0x02}, /* 0x34, RT_CHANNEL_DOMAIN_FCC1_FCC7 */
{0x00}, /* 0x35, RT_CHANNEL_DOMAIN_WORLD_ETSI2 */
{0x00}, /* 0x36, RT_CHANNEL_DOMAIN_WORLD_ETSI3 */
{0x03}, /* 0x37, RT_CHANNEL_DOMAIN_MKK1_MKK2 */
{0x03}, /* 0x38, RT_CHANNEL_DOMAIN_MKK1_MKK3 */
{0x02}, /* 0x39, RT_CHANNEL_DOMAIN_FCC1_NCC1 */
{0x00}, /* 0x3A, */
{0x00}, /* 0x3B, */
{0x00}, /* 0x3C, */
{0x00}, /* 0x3D, */
{0x00}, /* 0x3E, */
{0x00}, /* 0x3F, */
{0x02}, /* 0x40, RT_CHANNEL_DOMAIN_FCC1_NCC2 */
{0x05}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_NULL */
{0x01}, /* 0x42, RT_CHANNEL_DOMAIN_ETSI1_ETSI4 */
{0x02}, /* 0x43, RT_CHANNEL_DOMAIN_FCC1_FCC2 */
{0x02}, /* 0x44, RT_CHANNEL_DOMAIN_FCC1_NCC3 */
{0x00}, /* 0x45, RT_CHANNEL_DOMAIN_WORLD_ETSI5 */
{0x02}, /* 0x46, RT_CHANNEL_DOMAIN_FCC1_FCC8 */
{0x00}, /* 0x47, RT_CHANNEL_DOMAIN_WORLD_ETSI6 */
{0x00}, /* 0x48, RT_CHANNEL_DOMAIN_WORLD_ETSI7 */
{0x00}, /* 0x49, RT_CHANNEL_DOMAIN_WORLD_ETSI8 */
{0x00}, /* 0x50, RT_CHANNEL_DOMAIN_WORLD_ETSI9 */
{0x00}, /* 0x51, RT_CHANNEL_DOMAIN_WORLD_ETSI10 */
{0x00}, /* 0x52, RT_CHANNEL_DOMAIN_WORLD_ETSI11 */
{0x02}, /* 0x53, RT_CHANNEL_DOMAIN_FCC1_NCC4 */
{0x00}, /* 0x54, RT_CHANNEL_DOMAIN_WORLD_ETSI12 */
{0x02}, /* 0x55, RT_CHANNEL_DOMAIN_FCC1_FCC9 */
{0x00}, /* 0x56, RT_CHANNEL_DOMAIN_WORLD_ETSI13 */
{0x02}, /* 0x57, RT_CHANNEL_DOMAIN_FCC1_FCC10 */
};
/* use the combination for max channel numbers */
static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03};
/* Search the @param ch in given @param ch_set
* @ch_set: the given channel set
* @ch: the given channel number
*
* return the index of channel_num in channel_set, -1 if not found
*/
int rtw_ch_set_search_ch(struct rt_channel_info *ch_set, const u32 ch)
{
int i;
for (i = 0; ch_set[i].ChannelNum != 0; i++) {
if (ch == ch_set[i].ChannelNum)
break;
}
if (i >= ch_set[i].ChannelNum)
return -1;
return i;
}
/****************************************************************************
Following are the initialization functions for WiFi MLME
*****************************************************************************/
int init_hw_mlme_ext(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
return _SUCCESS;
}
void init_mlme_default_rate_set(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
unsigned char mixed_datarate[NumRates] = {_1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_, _9M_RATE_, _12M_RATE_, _18M_RATE_, _24M_RATE_, _36M_RATE_, _48M_RATE_, _54M_RATE_, 0xff};
unsigned char mixed_basicrate[NumRates] = {_1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_, _12M_RATE_, _24M_RATE_, 0xff,};
unsigned char supported_mcs_set[16] = {0xff, 0xff, 0x00, 0x00, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
memcpy(pmlmeext->datarate, mixed_datarate, NumRates);
memcpy(pmlmeext->basicrate, mixed_basicrate, NumRates);
memcpy(pmlmeext->default_supported_mcs_set, supported_mcs_set, sizeof(pmlmeext->default_supported_mcs_set));
}
static void init_mlme_ext_priv_value(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
atomic_set(&pmlmeext->event_seq, 0);
pmlmeext->mgnt_seq = 0;/* reset to zero when disconnect at client mode */
pmlmeext->sa_query_seq = 0;
pmlmeext->mgnt_80211w_IPN = 0;
pmlmeext->mgnt_80211w_IPN_rx = 0;
pmlmeext->cur_channel = padapter->registrypriv.channel;
pmlmeext->cur_bwmode = CHANNEL_WIDTH_20;
pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
pmlmeext->retry = 0;
pmlmeext->cur_wireless_mode = padapter->registrypriv.wireless_mode;
init_mlme_default_rate_set(padapter);
pmlmeext->tx_rate = IEEE80211_CCK_RATE_1MB;
pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
pmlmeext->sitesurvey_res.channel_idx = 0;
pmlmeext->sitesurvey_res.bss_cnt = 0;
pmlmeext->scan_abort = false;
pmlmeinfo->state = WIFI_FW_NULL_STATE;
pmlmeinfo->reauth_count = 0;
pmlmeinfo->reassoc_count = 0;
pmlmeinfo->link_count = 0;
pmlmeinfo->auth_seq = 0;
pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
pmlmeinfo->key_index = 0;
pmlmeinfo->iv = 0;
pmlmeinfo->enc_algo = _NO_PRIVACY_;
pmlmeinfo->authModeToggle = 0;
memset(pmlmeinfo->chg_txt, 0, 128);
pmlmeinfo->slotTime = SHORT_SLOT_TIME;
pmlmeinfo->preamble_mode = PREAMBLE_AUTO;
pmlmeinfo->dialogToken = 0;
pmlmeext->action_public_rxseq = 0xffff;
pmlmeext->action_public_dialog_token = 0xff;
}
static int has_channel(struct rt_channel_info *channel_set,
u8 chanset_size,
u8 chan)
{
int i;
for (i = 0; i < chanset_size; i++)
if (channel_set[i].ChannelNum == chan)
return 1;
return 0;
}
static void init_channel_list(struct adapter *padapter, struct rt_channel_info *channel_set,
u8 chanset_size,
struct p2p_channels *channel_list)
{
static const struct p2p_oper_class_map op_class[] = {
{ IEEE80211G, 81, 1, 13, 1, BW20 },
{ IEEE80211G, 82, 14, 14, 1, BW20 },
{ IEEE80211A, 115, 36, 48, 4, BW20 },
{ IEEE80211A, 116, 36, 44, 8, BW40PLUS },
{ IEEE80211A, 117, 40, 48, 8, BW40MINUS },
{ IEEE80211A, 124, 149, 161, 4, BW20 },
{ IEEE80211A, 125, 149, 169, 4, BW20 },
{ IEEE80211A, 126, 149, 157, 8, BW40PLUS },
{ IEEE80211A, 127, 153, 161, 8, BW40MINUS },
{ -1, 0, 0, 0, 0, BW20 }
};
int cla, op;
cla = 0;
for (op = 0; op_class[op].op_class; op++) {
u8 ch;
const struct p2p_oper_class_map *o = &op_class[op];
struct p2p_reg_class *reg = NULL;
for (ch = o->min_chan; ch <= o->max_chan; ch += o->inc) {
if (!has_channel(channel_set, chanset_size, ch))
continue;
if ((padapter->registrypriv.ht_enable == 0) && (o->inc == 8))
continue;
if ((0 < (padapter->registrypriv.bw_mode & 0xf0)) &&
((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
continue;
if (!reg) {
reg = &channel_list->reg_class[cla];
cla++;
reg->reg_class = o->op_class;
reg->channels = 0;
}
reg->channel[reg->channels] = ch;
reg->channels++;
}
}
channel_list->reg_classes = cla;
}
static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_channel_info *channel_set)
{
u8 index, chanset_size = 0;
u8 b2_4GBand = false;
u8 Index2G = 0;
memset(channel_set, 0, sizeof(struct rt_channel_info)*MAX_CHANNEL_NUM);
if (ChannelPlan >= RT_CHANNEL_DOMAIN_MAX && ChannelPlan != RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
return chanset_size;
if (is_supported_24g(padapter->registrypriv.wireless_mode)) {
b2_4GBand = true;
if (ChannelPlan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
else
Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
}
if (b2_4GBand) {
for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
if ((ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN) ||/* Channel 1~11 is active, and 12~14 is passive */
(ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_NULL)) {
if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
channel_set[chanset_size].ScanType = SCAN_PASSIVE;
} else if (ChannelPlan == RT_CHANNEL_DOMAIN_WORLD_WIDE_13 ||
Index2G == RT_CHANNEL_DOMAIN_2G_WORLD) { /* channel 12~13, passive scan */
if (channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else
channel_set[chanset_size].ScanType = SCAN_PASSIVE;
} else
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
chanset_size++;
}
}
return chanset_size;
}
void init_mlme_ext_priv(struct adapter *padapter)
{
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
pmlmeext->padapter = padapter;
/* fill_fwpriv(padapter, &(pmlmeext->fwpriv)); */
init_mlme_ext_priv_value(padapter);
pmlmeinfo->accept_addba_req = pregistrypriv->accept_addba_req;
init_mlme_ext_timer(padapter);
init_mlme_ap_info(padapter);
pmlmeext->max_chan_nums = init_channel_set(padapter, pmlmepriv->ChannelPlan, pmlmeext->channel_set);
init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
pmlmeext->last_scan_time = 0;
pmlmeext->chan_scan_time = SURVEY_TO;
pmlmeext->mlmeext_init = true;
pmlmeext->active_keep_alive_check = true;
#ifdef DBG_FIXED_CHAN
pmlmeext->fixed_chan = 0xFF;
#endif
}
void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
{
struct adapter *padapter = pmlmeext->padapter;
if (!padapter)
return;
if (padapter->bDriverStopped) {
del_timer_sync(&pmlmeext->survey_timer);
del_timer_sync(&pmlmeext->link_timer);
/* del_timer_sync(&pmlmeext->ADDBA_timer); */
}
}
static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, union recv_frame *precv_frame)
{
u8 *pframe = precv_frame->u.hdr.rx_data;
if (ptable->func) {
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
!is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
return;
ptable->func(padapter, precv_frame);
}
}
void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
{
int index;
struct mlme_handler *ptable;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 *pframe = precv_frame->u.hdr.rx_data;
struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
struct dvobj_priv *psdpriv = padapter->dvobj;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
if (GetFrameType(pframe) != WIFI_MGT_TYPE)
return;
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
!is_broadcast_ether_addr(GetAddr1Ptr(pframe))) {
return;
}
ptable = mlme_sta_tbl;
index = GetFrameSubType(pframe) >> 4;
if (index >= ARRAY_SIZE(mlme_sta_tbl))
return;
ptable += index;
if (psta) {
if (GetRetry(pframe)) {
if (precv_frame->u.hdr.attrib.seq_num == psta->RxMgmtFrameSeqNum) {
/* drop the duplicate management frame */
pdbgpriv->dbg_rx_dup_mgt_frame_drop_count++;
return;
}
}
psta->RxMgmtFrameSeqNum = precv_frame->u.hdr.attrib.seq_num;
}
switch (GetFrameSubType(pframe)) {
case WIFI_AUTH:
if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
ptable->func = &OnAuth;
else
ptable->func = &OnAuthClient;
fallthrough;
case WIFI_ASSOCREQ:
case WIFI_REASSOCREQ:
_mgt_dispatcher(padapter, ptable, precv_frame);
break;
case WIFI_PROBEREQ:
_mgt_dispatcher(padapter, ptable, precv_frame);
break;
case WIFI_BEACON:
_mgt_dispatcher(padapter, ptable, precv_frame);
break;
case WIFI_ACTION:
/* if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) */
_mgt_dispatcher(padapter, ptable, precv_frame);
break;
default:
_mgt_dispatcher(padapter, ptable, precv_frame);
break;
}
}
/****************************************************************************
Following are the callback functions for each subtype of the management frames
*****************************************************************************/
unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame)
{
unsigned int ielen;
unsigned char *p;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur = &pmlmeinfo->network;
u8 *pframe = precv_frame->u.hdr.rx_data;
uint len = precv_frame->u.hdr.len;
u8 is_valid_p2p_probereq = false;
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
return _SUCCESS;
if (check_fwstate(pmlmepriv, _FW_LINKED) == false &&
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE) == false) {
return _SUCCESS;
}
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, WLAN_EID_SSID, (int *)&ielen,
len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
/* check (wildcard) SSID */
if (p) {
if (is_valid_p2p_probereq)
goto _issue_probersp;
if ((ielen != 0 && false == !memcmp((void *)(p+2), (void *)cur->ssid.ssid, cur->ssid.ssid_length))
|| (ielen == 0 && pmlmeinfo->hidden_ssid_mode)
)
return _SUCCESS;
_issue_probersp:
if ((check_fwstate(pmlmepriv, _FW_LINKED) &&
pmlmepriv->cur_network.join_res) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))
issue_probersp(padapter, get_sa(pframe), is_valid_p2p_probereq);
}
return _SUCCESS;
}
unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
report_survey_event(padapter, precv_frame);
return _SUCCESS;
}
return _SUCCESS;
}
unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
{
int cam_idx;
struct sta_info *psta;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->u.hdr.rx_data;
uint len = precv_frame->u.hdr.len;
struct wlan_bssid_ex *pbss;
int ret = _SUCCESS;
u8 *p = NULL;
u32 ielen = 0;
p = rtw_get_ie(pframe + sizeof(struct ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_, WLAN_EID_EXT_SUPP_RATES, &ielen, precv_frame->u.hdr.len - sizeof(struct ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_);
if (p && ielen > 0) {
if ((*(p + 1 + ielen) == 0x2D) && (*(p + 2 + ielen) != 0x2D))
/* Invalid value 0x2D is detected in Extended Supported Rates (ESR) IE. Try to fix the IE length to avoid failed Beacon parsing. */
*(p + 1) = ielen - 1;
}
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
report_survey_event(padapter, precv_frame);
return _SUCCESS;
}
if (!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) {
if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
/* we should update current network before auth, or some IE is wrong */
pbss = rtw_malloc(sizeof(struct wlan_bssid_ex));
if (pbss) {
if (collect_bss_info(padapter, precv_frame, pbss) == _SUCCESS) {
update_network(&(pmlmepriv->cur_network.network), pbss, padapter, true);
rtw_get_bcn_info(&(pmlmepriv->cur_network));
}
kfree(pbss);
}
/* check the vendor of the assoc AP */
pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe+sizeof(struct ieee80211_hdr_3addr), len-sizeof(struct ieee80211_hdr_3addr));
/* update TSF Value */
update_TSF(pmlmeext, pframe, len);
/* reset for adaptive_early_32k */
pmlmeext->adaptive_tsf_done = false;
pmlmeext->DrvBcnEarly = 0xff;
pmlmeext->DrvBcnTimeOut = 0xff;
pmlmeext->bcn_cnt = 0;
memset(pmlmeext->bcn_delay_cnt, 0, sizeof(pmlmeext->bcn_delay_cnt));
memset(pmlmeext->bcn_delay_ratio, 0, sizeof(pmlmeext->bcn_delay_ratio));
/* start auth */
start_clnt_auth(padapter);
return _SUCCESS;
}
if (((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) && (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) {
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (psta) {
ret = rtw_check_bcn_info(padapter, pframe, len);
if (!ret) {
netdev_dbg(padapter->pnetdev,
"ap has changed, disconnect now\n ");
receive_disconnect(padapter,
pmlmeinfo->network.mac_address, 0);
return _SUCCESS;
}
/* update WMM, ERP in the beacon */
/* todo: the timer is used instead of the number of the beacon received */
if ((sta_rx_pkts(psta) & 0xf) == 0)
update_beacon_info(padapter, pframe, len, psta);
adaptive_early_32k(pmlmeext, pframe, len);
}
} else if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (psta) {
/* update WMM, ERP in the beacon */
/* todo: the timer is used instead of the number of the beacon received */
if ((sta_rx_pkts(psta) & 0xf) == 0)
update_beacon_info(padapter, pframe, len, psta);
} else {
/* allocate a new CAM entry for IBSS station */
cam_idx = allocate_fw_sta_entry(padapter);
if (cam_idx == NUM_STA)
goto _END_ONBEACON_;
/* get supported rate */
if (update_sta_support_rate(padapter, (pframe + WLAN_HDR_A3_LEN + _BEACON_IE_OFFSET_), (len - WLAN_HDR_A3_LEN - _BEACON_IE_OFFSET_), cam_idx) == _FAIL) {
pmlmeinfo->FW_sta_info[cam_idx].status = 0;
goto _END_ONBEACON_;
}
/* update TSF Value */
update_TSF(pmlmeext, pframe, len);
/* report sta add event */
report_add_sta_event(padapter, GetAddr2Ptr(pframe), cam_idx);
}
}
}
_END_ONBEACON_:
return _SUCCESS;
}
unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
{
unsigned int auth_mode, seq, ie_len;
unsigned char *sa, *p;
u16 algorithm;
int status;
static struct sta_info stat;
struct sta_info *pstat = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 *pframe = precv_frame->u.hdr.rx_data;
uint len = precv_frame->u.hdr.len;
u8 offset = 0;
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
return _FAIL;
sa = GetAddr2Ptr(pframe);
auth_mode = psecuritypriv->dot11AuthAlgrthm;
if (GetPrivacy(pframe)) {
u8 *iv;
struct rx_pkt_attrib *prxattrib = &(precv_frame->u.hdr.attrib);
prxattrib->hdrlen = WLAN_HDR_A3_LEN;
prxattrib->encrypt = _WEP40_;
iv = pframe+prxattrib->hdrlen;
prxattrib->key_index = ((iv[3]>>6)&0x3);
prxattrib->iv_len = 4;
prxattrib->icv_len = 4;
rtw_wep_decrypt(padapter, (u8 *)precv_frame);
offset = 4;
}
algorithm = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset));
seq = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset + 2));
if (auth_mode == 2 &&
psecuritypriv->dot11PrivacyAlgrthm != _WEP40_ &&
psecuritypriv->dot11PrivacyAlgrthm != _WEP104_)
auth_mode = 0;
if ((algorithm > 0 && auth_mode == 0) || /* rx a shared-key auth but shared not enabled */
(algorithm == 0 && auth_mode == 1)) { /* rx a open-system auth but shared-key is enabled */
status = WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
goto auth_fail;
}
if (rtw_access_ctrl(padapter, sa) == false) {
status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
goto auth_fail;
}
pstat = rtw_get_stainfo(pstapriv, sa);
if (!pstat) {
/* allocate a new one */
pstat = rtw_alloc_stainfo(pstapriv, sa);
if (!pstat) {
status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
goto auth_fail;
}
pstat->state = WIFI_FW_AUTH_NULL;
pstat->auth_seq = 0;
/* pstat->flags = 0; */
/* pstat->capability = 0; */
} else {
spin_lock_bh(&pstapriv->asoc_list_lock);
if (list_empty(&pstat->asoc_list) == false) {
list_del_init(&pstat->asoc_list);
pstapriv->asoc_list_cnt--;
if (pstat->expire_to > 0) {
/* TODO: STA re_auth within expire_to */
}
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
if (seq == 1) {
/* TODO: STA re_auth and auth timeout */
}
}
spin_lock_bh(&pstapriv->auth_list_lock);
if (list_empty(&pstat->auth_list)) {
list_add_tail(&pstat->auth_list, &pstapriv->auth_list);
pstapriv->auth_list_cnt++;
}
spin_unlock_bh(&pstapriv->auth_list_lock);
if (pstat->auth_seq == 0)
pstat->expire_to = pstapriv->auth_to;
if ((pstat->auth_seq + 1) != seq) {
status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
goto auth_fail;
}
if (algorithm == 0 && (auth_mode == 0 || auth_mode == 2 || auth_mode == 3)) {
if (seq == 1) {
pstat->state &= ~WIFI_FW_AUTH_NULL;
pstat->state |= WIFI_FW_AUTH_SUCCESS;
pstat->expire_to = pstapriv->assoc_to;
pstat->authalg = algorithm;
} else {
status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
goto auth_fail;
}
} else { /* shared system or auto authentication */
if (seq == 1) {
/* prepare for the challenging txt... */
memset((void *)pstat->chg_txt, 78, 128);
pstat->state &= ~WIFI_FW_AUTH_NULL;
pstat->state |= WIFI_FW_AUTH_STATE;
pstat->authalg = algorithm;
} else if (seq == 3) {
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, WLAN_EID_CHALLENGE, (int *)&ie_len,
len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
if (!p || ie_len <= 0) {
status = WLAN_STATUS_CHALLENGE_FAIL;
goto auth_fail;
}
if (!memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
pstat->state &= (~WIFI_FW_AUTH_STATE);
pstat->state |= WIFI_FW_AUTH_SUCCESS;
/* challenging txt is correct... */
pstat->expire_to = pstapriv->assoc_to;
} else {
status = WLAN_STATUS_CHALLENGE_FAIL;
goto auth_fail;
}
} else {
status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
goto auth_fail;
}
}
/* Now, we are going to issue_auth... */
pstat->auth_seq = seq + 1;
issue_auth(padapter, pstat, (unsigned short)(WLAN_STATUS_SUCCESS));
if (pstat->state & WIFI_FW_AUTH_SUCCESS)
pstat->auth_seq = 0;
return _SUCCESS;
auth_fail:
if (pstat)
rtw_free_stainfo(padapter, pstat);
pstat = &stat;
memset((char *)pstat, '\0', sizeof(stat));
pstat->auth_seq = 2;
memcpy(pstat->hwaddr, sa, 6);
issue_auth(padapter, pstat, (unsigned short)status);
return _FAIL;
}
unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_frame)
{
unsigned int seq, len, status, offset;
unsigned char *p;
unsigned int go2asoc = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 *pframe = precv_frame->u.hdr.rx_data;
uint pkt_len = precv_frame->u.hdr.len;
/* check A1 matches or not */
if (memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
return _SUCCESS;
if (!(pmlmeinfo->state & WIFI_FW_AUTH_STATE))
return _SUCCESS;
offset = (GetPrivacy(pframe)) ? 4 : 0;
seq = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset + 2));
status = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset + 4));
if (status != 0) {
if (status == 13) { /* pmlmeinfo->auth_algo == dot11AuthAlgrthm_Auto) */
if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
else
pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared;
/* pmlmeinfo->reauth_count = 0; */
}
set_link_timer(pmlmeext, 1);
goto authclnt_fail;
}
if (seq == 2) {
if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
/* legendary shared system */
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, WLAN_EID_CHALLENGE, (int *)&len,
pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_);
if (!p)
goto authclnt_fail;
memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len);
pmlmeinfo->auth_seq = 3;
issue_auth(padapter, NULL, 0);
set_link_timer(pmlmeext, REAUTH_TO);
return _SUCCESS;
}
/* open system */
go2asoc = 1;
} else if (seq == 4) {
if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
go2asoc = 1;
else
goto authclnt_fail;
} else {
/* this is also illegal */
goto authclnt_fail;
}
if (go2asoc) {
netdev_dbg(padapter->pnetdev, "auth success, start assoc\n");
start_clnt_assoc(padapter);
return _SUCCESS;
}
authclnt_fail:
/* pmlmeinfo->state &= ~(WIFI_FW_AUTH_STATE); */
return _FAIL;
}
unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
{
u16 capab_info;
struct rtw_ieee802_11_elems elems;
struct sta_info *pstat;
unsigned char *p, *pos, *wpa_ie;
unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
int i, ie_len, wpa_ie_len, left;
unsigned char supportRate[16];
int supportRateNum;
unsigned short status = WLAN_STATUS_SUCCESS;
unsigned short frame_type, ie_offset = 0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->u.hdr.rx_data;
uint pkt_len = precv_frame->u.hdr.len;
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
return _FAIL;
frame_type = GetFrameSubType(pframe);
if (frame_type == WIFI_ASSOCREQ)
ie_offset = _ASOCREQ_IE_OFFSET_;
else /* WIFI_REASSOCREQ */
ie_offset = _REASOCREQ_IE_OFFSET_;
if (pkt_len < sizeof(struct ieee80211_hdr_3addr) + ie_offset)
return _FAIL;
pstat = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (!pstat) {
status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
goto asoc_class2_error;
}
capab_info = get_unaligned_le16(pframe + WLAN_HDR_A3_LEN);
/* capab_info = le16_to_cpu(*(unsigned short *)(pframe + WLAN_HDR_A3_LEN)); */
left = pkt_len - (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
pos = pframe + (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
/* check if this stat has been successfully authenticated/assocated */
if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS)) {
if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS)) {
status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
goto asoc_class2_error;
} else {
pstat->state &= (~WIFI_FW_ASSOC_SUCCESS);
pstat->state |= WIFI_FW_ASSOC_STATE;
}
} else {
pstat->state &= (~WIFI_FW_AUTH_SUCCESS);
pstat->state |= WIFI_FW_ASSOC_STATE;
}
pstat->capability = capab_info;
/* now parse all ieee802_11 ie to point to elems */
if (rtw_ieee802_11_parse_elems(pos, left, &elems, 1) == ParseFailed ||
!elems.ssid) {
status = WLAN_STATUS_CHALLENGE_FAIL;
goto OnAssocReqFail;
}
/* now we should check all the fields... */
/* checking SSID */
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, WLAN_EID_SSID, &ie_len,
pkt_len - WLAN_HDR_A3_LEN - ie_offset);
if (!p || ie_len == 0) {
/* broadcast ssid, however it is not allowed in assocreq */
status = WLAN_STATUS_CHALLENGE_FAIL;
goto OnAssocReqFail;
} else {
/* check if ssid match */
if (memcmp((void *)(p+2), cur->ssid.ssid, cur->ssid.ssid_length))
status = WLAN_STATUS_CHALLENGE_FAIL;
if (ie_len != cur->ssid.ssid_length)
status = WLAN_STATUS_CHALLENGE_FAIL;
}
if (status != WLAN_STATUS_SUCCESS)
goto OnAssocReqFail;
/* check if the supported rate is ok */
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, WLAN_EID_SUPP_RATES, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
if (!p) {
/* use our own rate set as statoin used */
/* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
/* supportRateNum = AP_BSSRATE_LEN; */
status = WLAN_STATUS_CHALLENGE_FAIL;
goto OnAssocReqFail;
} else {
memcpy(supportRate, p+2, ie_len);
supportRateNum = ie_len;
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, WLAN_EID_EXT_SUPP_RATES, &ie_len,
pkt_len - WLAN_HDR_A3_LEN - ie_offset);
if (p) {
if (supportRateNum <= sizeof(supportRate)) {
memcpy(supportRate+supportRateNum, p+2, ie_len);
supportRateNum += ie_len;
}
}
}
/* todo: mask supportRate between AP & STA -> move to update raid */
/* get_matched_rate(pmlmeext, supportRate, &supportRateNum, 0); */
/* update station supportRate */
pstat->bssratelen = supportRateNum;
memcpy(pstat->bssrateset, supportRate, supportRateNum);
UpdateBrateTblForSoftAP(pstat->bssrateset, pstat->bssratelen);
/* check RSN/WPA/WPS */
pstat->dot8021xalg = 0;
pstat->wpa_psk = 0;
pstat->wpa_group_cipher = 0;
pstat->wpa2_group_cipher = 0;
pstat->wpa_pairwise_cipher = 0;
pstat->wpa2_pairwise_cipher = 0;
memset(pstat->wpa_ie, 0, sizeof(pstat->wpa_ie));
if ((psecuritypriv->wpa_psk & BIT(1)) && elems.rsn_ie) {
int group_cipher = 0, pairwise_cipher = 0;
wpa_ie = elems.rsn_ie;
wpa_ie_len = elems.rsn_ie_len;
if (rtw_parse_wpa2_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
pstat->dot8021xalg = 1;/* psk, todo:802.1x */
pstat->wpa_psk |= BIT(1);
pstat->wpa2_group_cipher = group_cipher&psecuritypriv->wpa2_group_cipher;
pstat->wpa2_pairwise_cipher = pairwise_cipher&psecuritypriv->wpa2_pairwise_cipher;
if (!pstat->wpa2_group_cipher)
status = WLAN_STATUS_INVALID_GROUP_CIPHER;
if (!pstat->wpa2_pairwise_cipher)
status = WLAN_STATUS_INVALID_PAIRWISE_CIPHER;
} else {
status = WLAN_STATUS_INVALID_IE;
}
} else if ((psecuritypriv->wpa_psk & BIT(0)) && elems.wpa_ie) {
int group_cipher = 0, pairwise_cipher = 0;
wpa_ie = elems.wpa_ie;
wpa_ie_len = elems.wpa_ie_len;
if (rtw_parse_wpa_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
pstat->dot8021xalg = 1;/* psk, todo:802.1x */
pstat->wpa_psk |= BIT(0);
pstat->wpa_group_cipher = group_cipher&psecuritypriv->wpa_group_cipher;
pstat->wpa_pairwise_cipher = pairwise_cipher&psecuritypriv->wpa_pairwise_cipher;
if (!pstat->wpa_group_cipher)
status = WLAN_STATUS_INVALID_GROUP_CIPHER;
if (!pstat->wpa_pairwise_cipher)
status = WLAN_STATUS_INVALID_PAIRWISE_CIPHER;
} else {
status = WLAN_STATUS_INVALID_IE;
}
} else {
wpa_ie = NULL;
wpa_ie_len = 0;
}
if (status != WLAN_STATUS_SUCCESS)
goto OnAssocReqFail;
pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
if (!wpa_ie) {
if (elems.wps_ie) {
pstat->flags |= WLAN_STA_WPS;
/* wpabuf_free(sta->wps_ie); */
/* sta->wps_ie = wpabuf_alloc_copy(elems.wps_ie + 4, */
/* elems.wps_ie_len - 4); */
} else {
pstat->flags |= WLAN_STA_MAYBE_WPS;
}
/* AP support WPA/RSN, and sta is going to do WPS, but AP is not ready */
/* that the selected registrar of AP is _FLASE */
if ((psecuritypriv->wpa_psk > 0)
&& (pstat->flags & (WLAN_STA_WPS|WLAN_STA_MAYBE_WPS))) {
if (pmlmepriv->wps_beacon_ie) {
u8 selected_registrar = 0;
rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR, &selected_registrar, NULL);
if (!selected_registrar) {
status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
goto OnAssocReqFail;
}
}
}
} else {
int copy_len;
if (psecuritypriv->wpa_psk == 0) {
status = WLAN_STATUS_INVALID_IE;
goto OnAssocReqFail;
}
if (elems.wps_ie) {
pstat->flags |= WLAN_STA_WPS;
copy_len = 0;
} else {
copy_len = ((wpa_ie_len+2) > sizeof(pstat->wpa_ie)) ? (sizeof(pstat->wpa_ie)):(wpa_ie_len+2);
}
if (copy_len > 0)
memcpy(pstat->wpa_ie, wpa_ie-2, copy_len);
}
/* check if there is WMM IE & support WWM-PS */
pstat->flags &= ~WLAN_STA_WME;
pstat->qos_option = 0;
pstat->qos_info = 0;
pstat->has_legacy_ac = true;
pstat->uapsd_vo = 0;
pstat->uapsd_vi = 0;
pstat->uapsd_be = 0;
pstat->uapsd_bk = 0;
if (pmlmepriv->qospriv.qos_option) {
p = pframe + WLAN_HDR_A3_LEN + ie_offset; ie_len = 0;
for (;;) {
p = rtw_get_ie(p, WLAN_EID_VENDOR_SPECIFIC, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
if (p) {
if (!memcmp(p+2, WMM_IE, 6)) {
pstat->flags |= WLAN_STA_WME;
pstat->qos_option = 1;
pstat->qos_info = *(p+8);
pstat->max_sp_len = (pstat->qos_info>>5)&0x3;
if ((pstat->qos_info&0xf) != 0xf)
pstat->has_legacy_ac = true;
else
pstat->has_legacy_ac = false;
if (pstat->qos_info&0xf) {
if (pstat->qos_info&BIT(0))
pstat->uapsd_vo = BIT(0)|BIT(1);
else
pstat->uapsd_vo = 0;
if (pstat->qos_info&BIT(1))
pstat->uapsd_vi = BIT(0)|BIT(1);
else
pstat->uapsd_vi = 0;
if (pstat->qos_info&BIT(2))
pstat->uapsd_bk = BIT(0)|BIT(1);
else
pstat->uapsd_bk = 0;
if (pstat->qos_info&BIT(3))
pstat->uapsd_be = BIT(0)|BIT(1);
else
pstat->uapsd_be = 0;
}
break;
}
} else {
break;
}
p = p + ie_len + 2;
}
}
/* save HT capabilities in the sta object */
memset(&pstat->htpriv.ht_cap, 0, sizeof(struct ieee80211_ht_cap));
if (elems.ht_capabilities && elems.ht_capabilities_len >= sizeof(struct ieee80211_ht_cap)) {
pstat->flags |= WLAN_STA_HT;
pstat->flags |= WLAN_STA_WME;
memcpy(&pstat->htpriv.ht_cap, elems.ht_capabilities, sizeof(struct ieee80211_ht_cap));
} else
pstat->flags &= ~WLAN_STA_HT;
if ((pmlmepriv->htpriv.ht_option == false) && (pstat->flags&WLAN_STA_HT)) {
status = WLAN_STATUS_CHALLENGE_FAIL;
goto OnAssocReqFail;
}
if ((pstat->flags & WLAN_STA_HT) &&
((pstat->wpa2_pairwise_cipher&WPA_CIPHER_TKIP) ||
(pstat->wpa_pairwise_cipher&WPA_CIPHER_TKIP))) {
/* status = WLAN_STATUS_CIPHER_SUITE_REJECTED; */
/* goto OnAssocReqFail; */
}
pstat->flags |= WLAN_STA_NONERP;
for (i = 0; i < pstat->bssratelen; i++) {
if ((pstat->bssrateset[i] & 0x7f) > 22) {
pstat->flags &= ~WLAN_STA_NONERP;
break;
}
}
if (pstat->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
pstat->flags |= WLAN_STA_SHORT_PREAMBLE;
else
pstat->flags &= ~WLAN_STA_SHORT_PREAMBLE;
if (status != WLAN_STATUS_SUCCESS)
goto OnAssocReqFail;
/* TODO: identify_proprietary_vendor_ie(); */
/* Realtek proprietary IE */
/* identify if this is Broadcom sta */
/* identify if this is ralink sta */
/* Customer proprietary IE */
/* get a unique AID */
if (pstat->aid == 0) {
for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
if (!pstapriv->sta_aid[pstat->aid - 1])
break;
/* if (pstat->aid > NUM_STA) { */
if (pstat->aid > pstapriv->max_num_sta) {
pstat->aid = 0;
status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
goto OnAssocReqFail;
} else {
pstapriv->sta_aid[pstat->aid - 1] = pstat;
}
}
pstat->state &= (~WIFI_FW_ASSOC_STATE);
pstat->state |= WIFI_FW_ASSOC_SUCCESS;
spin_lock_bh(&pstapriv->auth_list_lock);
if (!list_empty(&pstat->auth_list)) {
list_del_init(&pstat->auth_list);
pstapriv->auth_list_cnt--;
}
spin_unlock_bh(&pstapriv->auth_list_lock);
spin_lock_bh(&pstapriv->asoc_list_lock);
if (list_empty(&pstat->asoc_list)) {
pstat->expire_to = pstapriv->expire_to;
list_add_tail(&pstat->asoc_list, &pstapriv->asoc_list);
pstapriv->asoc_list_cnt++;
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
/* now the station is qualified to join our BSS... */
if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (status == WLAN_STATUS_SUCCESS)) {
/* 1 bss_cap_update & sta_info_update */
bss_cap_update_on_sta_join(padapter, pstat);
sta_info_update(padapter, pstat);
/* 2 issue assoc rsp before notify station join event. */
if (frame_type == WIFI_ASSOCREQ)
issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
else
issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
spin_lock_bh(&pstat->lock);
kfree(pstat->passoc_req);
pstat->assoc_req_len = 0;
pstat->passoc_req = rtw_zmalloc(pkt_len);
if (pstat->passoc_req) {
memcpy(pstat->passoc_req, pframe, pkt_len);
pstat->assoc_req_len = pkt_len;
}
spin_unlock_bh(&pstat->lock);
/* 3-(1) report sta add event */
report_add_sta_event(padapter, pstat->hwaddr, pstat->aid);
}
return _SUCCESS;
asoc_class2_error:
issue_deauth(padapter, (void *)GetAddr2Ptr(pframe), status);
return _FAIL;
OnAssocReqFail:
pstat->aid = 0;
if (frame_type == WIFI_ASSOCREQ)
issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
else
issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
return _FAIL;
}
unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
{
uint i;
int res;
unsigned short status;
struct ndis_80211_var_ie *pIE;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
/* struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); */
u8 *pframe = precv_frame->u.hdr.rx_data;
uint pkt_len = precv_frame->u.hdr.len;
/* check A1 matches or not */
if (memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
return _SUCCESS;
if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)))
return _SUCCESS;
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
return _SUCCESS;
del_timer_sync(&pmlmeext->link_timer);
/* status */
status = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 2));
if (status > 0) {
pmlmeinfo->state = WIFI_FW_NULL_STATE;
res = -4;
goto report_assoc_result;
}
/* get capabilities */
pmlmeinfo->capability = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
/* set slot time */
pmlmeinfo->slotTime = (pmlmeinfo->capability & BIT(10)) ? 9 : 20;
/* AID */
res = pmlmeinfo->aid = (int)(le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 4))&0x3fff);
/* following are moved to join event callback function */
/* to handle HT, WMM, rate adaptive, update MAC reg */
/* for not to handle the synchronous IO in the tasklet */
for (i = (6 + WLAN_HDR_A3_LEN); i < pkt_len;) {
pIE = (struct ndis_80211_var_ie *)(pframe + i);
switch (pIE->element_id) {
case WLAN_EID_VENDOR_SPECIFIC:
if (!memcmp(pIE->data, WMM_PARA_OUI, 6)) /* WMM */
WMM_param_handler(padapter, pIE);
break;
case WLAN_EID_HT_CAPABILITY: /* HT caps */
HT_caps_handler(padapter, pIE);
break;
case WLAN_EID_HT_OPERATION: /* HT info */
HT_info_handler(padapter, pIE);
break;
case WLAN_EID_ERP_INFO:
ERP_IE_handler(padapter, pIE);
break;
default:
break;
}
i += (pIE->length + 2);
}
pmlmeinfo->state &= (~WIFI_FW_ASSOC_STATE);
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
/* Update Basic Rate Table for spec, 2010-12-28 , by thomas */
UpdateBrateTbl(padapter, pmlmeinfo->network.supported_rates);
report_assoc_result:
if (res > 0)
rtw_buf_update(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len, pframe, pkt_len);
else
rtw_buf_free(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len);
report_join_res(padapter, res);
return _SUCCESS;
}
unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
{
unsigned short reason;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 *pframe = precv_frame->u.hdr.rx_data;
int ignore_received_deauth = 0;
/* check A3 */
if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
return _SUCCESS;
reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
/* rtw_free_stainfo(padapter, psta); */
netdev_dbg(padapter->pnetdev,
"ap recv deauth reason code(%d) sta:%pM\n", reason,
GetAddr2Ptr(pframe));
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (psta) {
u8 updated = false;
spin_lock_bh(&pstapriv->asoc_list_lock);
if (list_empty(&psta->asoc_list) == false) {
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, false, reason);
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
associated_clients_update(padapter, updated);
}
return _SUCCESS;
}
/* Commented by Albert 20130604 */
/* Before sending the auth frame to start the STA/GC mode connection with AP/GO, */
/* we will send the deauth first. */
/* However, the Win8.1 with BRCM Wi-Fi will send the deauth with reason code 6 to us after receieving our deauth. */
/* Added the following code to avoid this case. */
if ((pmlmeinfo->state & WIFI_FW_AUTH_STATE) ||
(pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) {
if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
ignore_received_deauth = 1;
} else if (reason == WLAN_REASON_PREV_AUTH_NOT_VALID) {
/* TODO: 802.11r */
ignore_received_deauth = 1;
}
}
netdev_dbg(padapter->pnetdev,
"sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
reason, GetAddr3Ptr(pframe),
ignore_received_deauth);
if (ignore_received_deauth == 0)
receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
}
unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
{
unsigned short reason;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 *pframe = precv_frame->u.hdr.rx_data;
/* check A3 */
if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
return _SUCCESS;
reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
/* rtw_free_stainfo(padapter, psta); */
netdev_dbg(padapter->pnetdev,
"ap recv disassoc reason code(%d) sta:%pM\n",
reason, GetAddr2Ptr(pframe));
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (psta) {
u8 updated = false;
spin_lock_bh(&pstapriv->asoc_list_lock);
if (list_empty(&psta->asoc_list) == false) {
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, false, reason);
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
associated_clients_update(padapter, updated);
}
return _SUCCESS;
}
netdev_dbg(padapter->pnetdev,
"sta recv disassoc reason code(%d) sta:%pM\n",
reason, GetAddr3Ptr(pframe));
receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
}
unsigned int OnAtim(struct adapter *padapter, union recv_frame *precv_frame)
{
return _SUCCESS;
}
unsigned int on_action_spct(struct adapter *padapter, union recv_frame *precv_frame)
{
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->u.hdr.rx_data;
u8 *frame_body = (u8 *)(pframe + sizeof(struct ieee80211_hdr_3addr));
u8 category;
u8 action;
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (!psta)
goto exit;
category = frame_body[0];
if (category != RTW_WLAN_CATEGORY_SPECTRUM_MGMT)
goto exit;
action = frame_body[1];
switch (action) {
case WLAN_ACTION_SPCT_MSR_REQ:
case WLAN_ACTION_SPCT_MSR_RPRT:
case WLAN_ACTION_SPCT_TPC_REQ:
case WLAN_ACTION_SPCT_TPC_RPRT:
case WLAN_ACTION_SPCT_CHL_SWITCH:
break;
default:
break;
}
exit:
return _FAIL;
}
unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_frame)
{
u8 *addr;
struct sta_info *psta = NULL;
struct recv_reorder_ctrl *preorder_ctrl;
unsigned char *frame_body;
unsigned char category, action;
unsigned short tid, status;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 *pframe = precv_frame->u.hdr.rx_data;
struct sta_priv *pstapriv = &padapter->stapriv;
/* check RA matches or not */
if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
return _SUCCESS;
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
return _SUCCESS;
addr = GetAddr2Ptr(pframe);
psta = rtw_get_stainfo(pstapriv, addr);
if (!psta)
return _SUCCESS;
frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
category = frame_body[0];
if (category == RTW_WLAN_CATEGORY_BACK) {/* representing Block Ack */
if (!pmlmeinfo->HT_enable)
return _SUCCESS;
action = frame_body[1];
switch (action) {
case WLAN_ACTION_ADDBA_REQ: /* ADDBA request */
memcpy(&(pmlmeinfo->ADDBA_req), &(frame_body[2]), sizeof(struct ADDBA_request));
/* process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), GetAddr3Ptr(pframe)); */
process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr);
if (pmlmeinfo->accept_addba_req)
issue_action_BA(padapter, addr, WLAN_ACTION_ADDBA_RESP, 0);
else
issue_action_BA(padapter, addr, WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
break;
case WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
status = get_unaligned_le16(&frame_body[3]);
tid = ((frame_body[5] >> 2) & 0x7);
if (status == 0) {
/* successful */
psta->htpriv.agg_enable_bitmap |= BIT(tid);
psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
} else {
psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
}
if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
psta->expire_to = pstapriv->expire_to;
psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
}
break;
case WLAN_ACTION_DELBA: /* DELBA */
if ((frame_body[3] & BIT(3)) == 0) {
psta->htpriv.agg_enable_bitmap &=
~BIT((frame_body[3] >> 4) & 0xf);
psta->htpriv.candidate_tid_bitmap &=
~BIT((frame_body[3] >> 4) & 0xf);
} else if ((frame_body[3] & BIT(3)) == BIT(3)) {
tid = (frame_body[3] >> 4) & 0x0F;
preorder_ctrl = &psta->recvreorder_ctrl[tid];
preorder_ctrl->enable = false;
preorder_ctrl->indicate_seq = 0xffff;
}
/* todo: how to notify the host while receiving DELETE BA */
break;
default:
break;
}
}
return _SUCCESS;
}
static s32 rtw_action_public_decache(union recv_frame *recv_frame, s32 token)
{
struct adapter *adapter = recv_frame->u.hdr.adapter;
struct mlme_ext_priv *mlmeext = &(adapter->mlmeextpriv);
u8 *frame = recv_frame->u.hdr.rx_data;
u16 seq_ctrl = ((recv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
(recv_frame->u.hdr.attrib.frag_num & 0xf);
if (GetRetry(frame)) {
if (token >= 0) {
if ((seq_ctrl == mlmeext->action_public_rxseq)
&& (token == mlmeext->action_public_dialog_token))
return _FAIL;
} else {
if (seq_ctrl == mlmeext->action_public_rxseq)
return _FAIL;
}
}
mlmeext->action_public_rxseq = seq_ctrl;
if (token >= 0)
mlmeext->action_public_dialog_token = token;
return _SUCCESS;
}
static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
{
u8 *pframe = precv_frame->u.hdr.rx_data;
u8 *frame_body;
u8 dialogToken = 0;
frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
dialogToken = frame_body[7];
if (rtw_action_public_decache(precv_frame, dialogToken) == _FAIL)
return _FAIL;
return _SUCCESS;
}
static unsigned int on_action_public_vendor(union recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->u.hdr.rx_data;
u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
if (!memcmp(frame_body + 2, P2P_OUI, 4))
ret = on_action_public_p2p(precv_frame);
return ret;
}
static unsigned int on_action_public_default(union recv_frame *precv_frame, u8 action)
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->u.hdr.rx_data;
uint frame_len = precv_frame->u.hdr.len;
u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
u8 token;
struct adapter *adapter = precv_frame->u.hdr.adapter;
char msg[64];
token = frame_body[2];
if (rtw_action_public_decache(precv_frame, token) == _FAIL)
goto exit;
scnprintf(msg, sizeof(msg), "%s(token:%u)", action_public_str(action), token);
rtw_cfg80211_rx_action(adapter, pframe, frame_len, msg);
ret = _SUCCESS;
exit:
return ret;
}
unsigned int on_action_public(struct adapter *padapter, union recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->u.hdr.rx_data;
u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
u8 category, action;
/* check RA matches or not */
if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))
goto exit;
category = frame_body[0];
if (category != RTW_WLAN_CATEGORY_PUBLIC)
goto exit;
action = frame_body[1];
switch (action) {
case ACT_PUBLIC_VENDOR:
ret = on_action_public_vendor(precv_frame);
break;
default:
ret = on_action_public_default(precv_frame, action);
break;
}
exit:
return ret;
}
unsigned int OnAction_ht(struct adapter *padapter, union recv_frame *precv_frame)
{
u8 *pframe = precv_frame->u.hdr.rx_data;
u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
u8 category, action;
/* check RA matches or not */
if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))
goto exit;
category = frame_body[0];
if (category != RTW_WLAN_CATEGORY_HT)
goto exit;
action = frame_body[1];
switch (action) {
case WLAN_HT_ACTION_COMPRESSED_BF:
break;
default:
break;
}
exit:
return _SUCCESS;
}
unsigned int OnAction_sa_query(struct adapter *padapter, union recv_frame *precv_frame)
{
u8 *pframe = precv_frame->u.hdr.rx_data;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
unsigned short tid;
switch (pframe[WLAN_HDR_A3_LEN+1]) {
case 0: /* SA Query req */
memcpy(&tid, &pframe[WLAN_HDR_A3_LEN+2], sizeof(unsigned short));
issue_action_SA_Query(padapter, GetAddr2Ptr(pframe), 1, tid);
break;
case 1: /* SA Query rsp */
del_timer_sync(&pmlmeext->sa_query_timer);
break;
default:
break;
}
if (0) {
int pp;
printk("pattrib->pktlen = %d =>", pattrib->pkt_len);
for (pp = 0; pp < pattrib->pkt_len; pp++)
printk(" %02x ", pframe[pp]);
printk("\n");
}
return _SUCCESS;
}
unsigned int OnAction(struct adapter *padapter, union recv_frame *precv_frame)
{
int i;
unsigned char category;
struct action_handler *ptable;
unsigned char *frame_body;
u8 *pframe = precv_frame->u.hdr.rx_data;
frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
category = frame_body[0];
for (i = 0; i < ARRAY_SIZE(OnAction_tbl); i++) {
ptable = &OnAction_tbl[i];
if (category == ptable->num)
ptable->func(padapter, precv_frame);
}
return _SUCCESS;
}
unsigned int DoReserved(struct adapter *padapter, union recv_frame *precv_frame)
{
return _SUCCESS;
}
static struct xmit_frame *_alloc_mgtxmitframe(struct xmit_priv *pxmitpriv, bool once)
{
struct xmit_frame *pmgntframe;
struct xmit_buf *pxmitbuf;
if (once)
pmgntframe = rtw_alloc_xmitframe_once(pxmitpriv);
else
pmgntframe = rtw_alloc_xmitframe_ext(pxmitpriv);
if (!pmgntframe)
goto exit;
pxmitbuf = rtw_alloc_xmitbuf_ext(pxmitpriv);
if (!pxmitbuf) {
rtw_free_xmitframe(pxmitpriv, pmgntframe);
pmgntframe = NULL;
goto exit;
}
pmgntframe->frame_tag = MGNT_FRAMETAG;
pmgntframe->pxmitbuf = pxmitbuf;
pmgntframe->buf_addr = pxmitbuf->pbuf;
pxmitbuf->priv_data = pmgntframe;
exit:
return pmgntframe;
}
inline struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
{
return _alloc_mgtxmitframe(pxmitpriv, false);
}
/****************************************************************************
Following are some TX functions for WiFi MLME
*****************************************************************************/
void update_mgnt_tx_rate(struct adapter *padapter, u8 rate)
{
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
pmlmeext->tx_rate = rate;
}
void update_mgntframe_attrib(struct adapter *padapter, struct pkt_attrib *pattrib)
{
u8 wireless_mode;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
/* memset((u8 *)(pattrib), 0, sizeof(struct pkt_attrib)); */
pattrib->hdrlen = 24;
pattrib->nr_frags = 1;
pattrib->priority = 7;
pattrib->mac_id = 0;
pattrib->qsel = 0x12;
pattrib->pktlen = 0;
if (pmlmeext->tx_rate == IEEE80211_CCK_RATE_1MB)
wireless_mode = WIRELESS_11B;
else
wireless_mode = WIRELESS_11G;
pattrib->raid = rtw_get_mgntframe_raid(padapter, wireless_mode);
pattrib->rate = pmlmeext->tx_rate;
pattrib->encrypt = _NO_PRIVACY_;
pattrib->bswenc = false;
pattrib->qos_en = false;
pattrib->ht_en = false;
pattrib->bwmode = CHANNEL_WIDTH_20;
pattrib->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
pattrib->sgi = false;
pattrib->seqnum = pmlmeext->mgnt_seq;
pattrib->retry_ctrl = true;
pattrib->mbssid = 0;
}
void update_mgntframe_attrib_addr(struct adapter *padapter, struct xmit_frame *pmgntframe)
{
u8 *pframe;
struct pkt_attrib *pattrib = &pmgntframe->attrib;
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
memcpy(pattrib->ra, GetAddr1Ptr(pframe), ETH_ALEN);
memcpy(pattrib->ta, GetAddr2Ptr(pframe), ETH_ALEN);
}
void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe)
{
if (padapter->bSurpriseRemoved ||
padapter->bDriverStopped) {
rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
return;
}
rtw_hal_mgnt_xmit(padapter, pmgntframe);
}
s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntframe, int timeout_ms)
{
s32 ret = _FAIL;
unsigned long irqL;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct xmit_buf *pxmitbuf = pmgntframe->pxmitbuf;
struct submit_ctx sctx;
if (padapter->bSurpriseRemoved ||
padapter->bDriverStopped) {
rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
return ret;
}
rtw_sctx_init(&sctx, timeout_ms);
pxmitbuf->sctx = &sctx;
ret = rtw_hal_mgnt_xmit(padapter, pmgntframe);
if (ret == _SUCCESS)
ret = rtw_sctx_wait(&sctx);
spin_lock_irqsave(&pxmitpriv->lock_sctx, irqL);
pxmitbuf->sctx = NULL;
spin_unlock_irqrestore(&pxmitpriv->lock_sctx, irqL);
return ret;
}
s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmgntframe)
{
static u8 seq_no;
s32 ret = _FAIL;
u32 timeout_ms = 500;/* 500ms */
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
if (padapter->bSurpriseRemoved ||
padapter->bDriverStopped) {
rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
return -1;
}
if (mutex_lock_interruptible(&pxmitpriv->ack_tx_mutex) == 0) {
pxmitpriv->ack_tx = true;
pxmitpriv->seq_no = seq_no++;
pmgntframe->ack_report = 1;
if (rtw_hal_mgnt_xmit(padapter, pmgntframe) == _SUCCESS)
ret = rtw_ack_tx_wait(pxmitpriv, timeout_ms);
pxmitpriv->ack_tx = false;
mutex_unlock(&pxmitpriv->ack_tx_mutex);
}
return ret;
}
static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
{
u8 *ssid_ie;
signed int ssid_len_ori;
int len_diff = 0;
ssid_ie = rtw_get_ie(ies, WLAN_EID_SSID, &ssid_len_ori, ies_len);
if (ssid_ie && ssid_len_ori > 0) {
switch (hidden_ssid_mode) {
case 1:
{
u8 *next_ie = ssid_ie + 2 + ssid_len_ori;
u32 remain_len = 0;
remain_len = ies_len - (next_ie-ies);
ssid_ie[1] = 0;
memcpy(ssid_ie+2, next_ie, remain_len);
len_diff -= ssid_len_ori;
break;
}
case 2:
memset(&ssid_ie[2], 0, ssid_len_ori);
break;
default:
break;
}
}
return len_diff;
}
void issue_beacon(struct adapter *padapter, int timeout_ms)
{
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned int rate_len;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
return;
spin_lock_bh(&pmlmepriv->bcn_update_lock);
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
pattrib->qsel = 0x10;
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
eth_broadcast_addr(pwlanhdr->addr1);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
/* pmlmeext->mgnt_seq++; */
SetFrameSubType(pframe, WIFI_BEACON);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
{
int len_diff;
memcpy(pframe, cur_network->ies, cur_network->ie_length);
len_diff = update_hidden_ssid(pframe+_BEACON_IE_OFFSET_,
cur_network->ie_length-_BEACON_IE_OFFSET_,
pmlmeinfo->hidden_ssid_mode);
pframe += (cur_network->ie_length+len_diff);
pattrib->pktlen += (cur_network->ie_length+len_diff);
}
{
u8 *wps_ie;
uint wps_ielen;
u8 sr = 0;
wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0)
rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
if (sr != 0)
set_fwstate(pmlmepriv, WIFI_UNDER_WPS);
else
_clr_fwstate_(pmlmepriv, WIFI_UNDER_WPS);
}
goto _issue_bcn;
}
/* below for ad-hoc mode */
/* timestamp will be inserted by hardware */
pframe += 8;
pattrib->pktlen += 8;
/* beacon interval: 2 bytes */
memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->ies)), 2);
pframe += 2;
pattrib->pktlen += 2;
/* capability info: 2 bytes */
memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->ies)), 2);
pframe += 2;
pattrib->pktlen += 2;
/* SSID */
pframe = rtw_set_ie(pframe, WLAN_EID_SSID, cur_network->ssid.ssid_length, cur_network->ssid.ssid, &pattrib->pktlen);
/* supported rates... */
rate_len = rtw_get_rateset_len(cur_network->supported_rates);
pframe = rtw_set_ie(pframe, WLAN_EID_SUPP_RATES, ((rate_len > 8) ? 8 : rate_len), cur_network->supported_rates, &pattrib->pktlen);
/* DS parameter set */
pframe = rtw_set_ie(pframe, WLAN_EID_DS_PARAMS, 1, (unsigned char *)&(cur_network->configuration.ds_config), &pattrib->pktlen);
/* if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) */
{
u8 erpinfo = 0;
u32 ATIMWindow;
/* IBSS Parameter Set... */
/* ATIMWindow = cur->configuration.ATIMWindow; */
ATIMWindow = 0;
pframe = rtw_set_ie(pframe, WLAN_EID_IBSS_PARAMS, 2, (unsigned char *)(&ATIMWindow), &pattrib->pktlen);
/* ERP IE */
pframe = rtw_set_ie(pframe, WLAN_EID_ERP_INFO, 1, &erpinfo, &pattrib->pktlen);
}
/* EXTERNDED SUPPORTED RATE */
if (rate_len > 8)
pframe = rtw_set_ie(pframe, WLAN_EID_EXT_SUPP_RATES, (rate_len - 8), (cur_network->supported_rates + 8), &pattrib->pktlen);
/* todo:HT for adhoc */
_issue_bcn:
pmlmepriv->update_bcn = false;
spin_unlock_bh(&pmlmepriv->bcn_update_lock);
if ((pattrib->pktlen + TXDESC_SIZE) > 512)
return;
pattrib->last_txcmdsz = pattrib->pktlen;
if (timeout_ms > 0)
dump_mgntframe_and_wait(padapter, pmgntframe, timeout_ms);
else
dump_mgntframe(padapter, pmgntframe);
}
void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p_probereq)
{
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac, *bssid;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
u8 *pwps_ie;
uint wps_ielen;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
unsigned int rate_len;
if (!da)
return;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
return;
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&(padapter->eeprompriv));
bssid = cur_network->mac_address;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(fctrl, WIFI_PROBERSP);
pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = pattrib->hdrlen;
pframe += pattrib->hdrlen;
if (cur_network->ie_length > MAX_IE_SZ)
return;
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
pwps_ie = rtw_get_wps_ie(cur_network->ies+_FIXED_IE_LENGTH_, cur_network->ie_length-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
/* inerset & update wps_probe_resp_ie */
if (pmlmepriv->wps_probe_resp_ie && pwps_ie && wps_ielen > 0) {
uint wps_offset, remainder_ielen;
u8 *premainder_ie;
wps_offset = (uint)(pwps_ie - cur_network->ies);
premainder_ie = pwps_ie + wps_ielen;
remainder_ielen = cur_network->ie_length - wps_offset - wps_ielen;
memcpy(pframe, cur_network->ies, wps_offset);
pframe += wps_offset;
pattrib->pktlen += wps_offset;
wps_ielen = (uint)pmlmepriv->wps_probe_resp_ie[1];/* to get ie data len */
if ((wps_offset+wps_ielen+2) <= MAX_IE_SZ) {
memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen+2);
pframe += wps_ielen+2;
pattrib->pktlen += wps_ielen+2;
}
if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
memcpy(pframe, premainder_ie, remainder_ielen);
pframe += remainder_ielen;
pattrib->pktlen += remainder_ielen;
}
} else {
memcpy(pframe, cur_network->ies, cur_network->ie_length);
pframe += cur_network->ie_length;
pattrib->pktlen += cur_network->ie_length;
}
/* retrieve SSID IE from cur_network->ssid */
{
u8 *ssid_ie;
signed int ssid_ielen;
signed int ssid_ielen_diff;
u8 *buf;
u8 *ies = pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr);
buf = rtw_zmalloc(MAX_IE_SZ);
if (!buf)
return;
ssid_ie = rtw_get_ie(ies+_FIXED_IE_LENGTH_, WLAN_EID_SSID, &ssid_ielen,
(pframe-ies)-_FIXED_IE_LENGTH_);
ssid_ielen_diff = cur_network->ssid.ssid_length - ssid_ielen;
if (ssid_ie && cur_network->ssid.ssid_length) {
uint remainder_ielen;
u8 *remainder_ie;
remainder_ie = ssid_ie+2;
remainder_ielen = (pframe-remainder_ie);
if (remainder_ielen > MAX_IE_SZ) {
netdev_warn(padapter->pnetdev,
FUNC_ADPT_FMT " remainder_ielen > MAX_IE_SZ\n",
FUNC_ADPT_ARG(padapter));
remainder_ielen = MAX_IE_SZ;
}
memcpy(buf, remainder_ie, remainder_ielen);
memcpy(remainder_ie+ssid_ielen_diff, buf, remainder_ielen);
*(ssid_ie+1) = cur_network->ssid.ssid_length;
memcpy(ssid_ie+2, cur_network->ssid.ssid, cur_network->ssid.ssid_length);
pframe += ssid_ielen_diff;
pattrib->pktlen += ssid_ielen_diff;
}
kfree(buf);
}
} else {
/* timestamp will be inserted by hardware */
pframe += 8;
pattrib->pktlen += 8;
/* beacon interval: 2 bytes */
memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->ies)), 2);
pframe += 2;
pattrib->pktlen += 2;
/* capability info: 2 bytes */
memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->ies)), 2);
pframe += 2;
pattrib->pktlen += 2;
/* below for ad-hoc mode */
/* SSID */
pframe = rtw_set_ie(pframe, WLAN_EID_SSID, cur_network->ssid.ssid_length, cur_network->ssid.ssid, &pattrib->pktlen);
/* supported rates... */
rate_len = rtw_get_rateset_len(cur_network->supported_rates);
pframe = rtw_set_ie(pframe, WLAN_EID_SUPP_RATES, ((rate_len > 8) ? 8 : rate_len), cur_network->supported_rates, &pattrib->pktlen);
/* DS parameter set */
pframe = rtw_set_ie(pframe, WLAN_EID_DS_PARAMS, 1, (unsigned char *)&(cur_network->configuration.ds_config), &pattrib->pktlen);
if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
u8 erpinfo = 0;
u32 ATIMWindow;
/* IBSS Parameter Set... */
/* ATIMWindow = cur->configuration.ATIMWindow; */
ATIMWindow = 0;
pframe = rtw_set_ie(pframe, WLAN_EID_IBSS_PARAMS, 2, (unsigned char *)(&ATIMWindow), &pattrib->pktlen);
/* ERP IE */
pframe = rtw_set_ie(pframe, WLAN_EID_ERP_INFO, 1, &erpinfo, &pattrib->pktlen);
}
/* EXTERNDED SUPPORTED RATE */
if (rate_len > 8)
pframe = rtw_set_ie(pframe, WLAN_EID_EXT_SUPP_RATES, (rate_len - 8), (cur_network->supported_rates + 8), &pattrib->pktlen);
/* todo:HT for adhoc */
}
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
return;
}
static int _issue_probereq(struct adapter *padapter,
struct ndis_802_11_ssid *pssid,
u8 *da, u8 ch, bool append_wps, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac;
unsigned char bssrate[NumRates];
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
int bssrate_len = 0;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
goto exit;
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&(padapter->eeprompriv));
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
if (da) {
/* unicast probe request frame */
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr3, da, ETH_ALEN);
} else {
/* broadcast probe request frame */
eth_broadcast_addr(pwlanhdr->addr1);
eth_broadcast_addr(pwlanhdr->addr3);
}
memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_PROBEREQ);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if (pssid)
pframe = rtw_set_ie(pframe, WLAN_EID_SSID, pssid->ssid_length, pssid->ssid, &(pattrib->pktlen));
else
pframe = rtw_set_ie(pframe, WLAN_EID_SSID, 0, NULL, &(pattrib->pktlen));
get_rate_set(padapter, bssrate, &bssrate_len);
if (bssrate_len > 8) {
pframe = rtw_set_ie(pframe, WLAN_EID_SUPP_RATES, 8, bssrate, &(pattrib->pktlen));
pframe = rtw_set_ie(pframe, WLAN_EID_EXT_SUPP_RATES, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
} else {
pframe = rtw_set_ie(pframe, WLAN_EID_SUPP_RATES, bssrate_len, bssrate, &(pattrib->pktlen));
}
if (ch)
pframe = rtw_set_ie(pframe, WLAN_EID_DS_PARAMS, 1, &ch, &pattrib->pktlen);
if (append_wps) {
/* add wps_ie for wps2.0 */
if (pmlmepriv->wps_probe_req_ie_len > 0 && pmlmepriv->wps_probe_req_ie) {
memcpy(pframe, pmlmepriv->wps_probe_req_ie, pmlmepriv->wps_probe_req_ie_len);
pframe += pmlmepriv->wps_probe_req_ie_len;
pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
}
}
pattrib->last_txcmdsz = pattrib->pktlen;
if (wait_ack) {
ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
} else {
dump_mgntframe(padapter, pmgntframe);
ret = _SUCCESS;
}
exit:
return ret;
}
inline void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da)
{
_issue_probereq(padapter, pssid, da, 0, 1, false);
}
int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, u8 ch, bool append_wps,
int try_cnt, int wait_ms)
{
int ret;
int i = 0;
do {
ret = _issue_probereq(padapter, pssid, da, ch, append_wps,
wait_ms > 0);
i++;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
ret = _SUCCESS;
#ifndef DBG_XMIT_ACK
goto exit;
#endif
}
exit:
return ret;
}
/* if psta == NULL, indicate we are station(client) now... */
void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status)
{
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned int val32;
unsigned short val16;
int use_shared_key = 0;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
__le16 le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
return;
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_AUTH);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if (psta) { /* for AP mode */
memcpy(pwlanhdr->addr1, psta->hwaddr, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
/* setting auth algo number */
val16 = (u16)psta->authalg;
if (status != WLAN_STATUS_SUCCESS)
val16 = 0;
if (val16)
use_shared_key = 1;
le_tmp = cpu_to_le16(val16);
pframe = rtw_set_fixed_ie(pframe, _AUTH_ALGM_NUM_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
/* setting auth seq number */
val16 = (u16)psta->auth_seq;
le_tmp = cpu_to_le16(val16);
pframe = rtw_set_fixed_ie(pframe, _AUTH_SEQ_NUM_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
/* setting status code... */
val16 = status;
le_tmp = cpu_to_le16(val16);
pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
/* added challenging text... */
if ((psta->auth_seq == 2) && (psta->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1))
pframe = rtw_set_ie(pframe, WLAN_EID_CHALLENGE, 128, psta->chg_txt, &(pattrib->pktlen));
} else {
memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
/* setting auth algo number */
val16 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) ? 1 : 0;/* 0:OPEN System, 1:Shared key */
if (val16)
use_shared_key = 1;
le_tmp = cpu_to_le16(val16);
/* setting IV for auth seq #3 */
if ((pmlmeinfo->auth_seq == 3) && (pmlmeinfo->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1)) {
__le32 le_tmp32;
val32 = ((pmlmeinfo->iv++) | (pmlmeinfo->key_index << 30));
le_tmp32 = cpu_to_le32(val32);
pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&le_tmp32, &(pattrib->pktlen));
pattrib->iv_len = 4;
}
pframe = rtw_set_fixed_ie(pframe, _AUTH_ALGM_NUM_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
/* setting auth seq number */
le_tmp = cpu_to_le16(pmlmeinfo->auth_seq);
pframe = rtw_set_fixed_ie(pframe, _AUTH_SEQ_NUM_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
/* setting status code... */
le_tmp = cpu_to_le16(status);
pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
/* then checking to see if sending challenging text... */
if ((pmlmeinfo->auth_seq == 3) && (pmlmeinfo->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1)) {
pframe = rtw_set_ie(pframe, WLAN_EID_CHALLENGE, 128, pmlmeinfo->chg_txt, &(pattrib->pktlen));
SetPrivacy(fctrl);
pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->encrypt = _WEP40_;
pattrib->icv_len = 4;
pattrib->pktlen += pattrib->icv_len;
}
}
pattrib->last_txcmdsz = pattrib->pktlen;
rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
dump_mgntframe(padapter, pmgntframe);
}
void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_info *pstat, int pkt_type)
{
struct xmit_frame *pmgntframe;
struct ieee80211_hdr *pwlanhdr;
struct pkt_attrib *pattrib;
unsigned char *pbuf, *pframe;
unsigned short val;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
u8 *ie = pnetwork->ies;
__le16 lestatus, le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
return;
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
memcpy((void *)GetAddr1Ptr(pwlanhdr), pstat->hwaddr, ETH_ALEN);
memcpy((void *)GetAddr2Ptr(pwlanhdr), myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy((void *)GetAddr3Ptr(pwlanhdr), get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
if ((pkt_type == WIFI_ASSOCRSP) || (pkt_type == WIFI_REASSOCRSP))
SetFrameSubType(pwlanhdr, pkt_type);
else
return;
pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen += pattrib->hdrlen;
pframe += pattrib->hdrlen;
/* capability */
val = *(unsigned short *)rtw_get_capability_from_ie(ie);
pframe = rtw_set_fixed_ie(pframe, _CAPABILITY_, (unsigned char *)&val, &(pattrib->pktlen));
lestatus = cpu_to_le16(status);
pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&lestatus, &(pattrib->pktlen));
le_tmp = cpu_to_le16(pstat->aid | BIT(14) | BIT(15));
pframe = rtw_set_fixed_ie(pframe, _ASOC_ID_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
if (pstat->bssratelen <= 8) {
pframe = rtw_set_ie(pframe, WLAN_EID_SUPP_RATES, pstat->bssratelen, pstat->bssrateset, &(pattrib->pktlen));
} else {
pframe = rtw_set_ie(pframe, WLAN_EID_SUPP_RATES, 8, pstat->bssrateset, &(pattrib->pktlen));
pframe = rtw_set_ie(pframe, WLAN_EID_EXT_SUPP_RATES, (pstat->bssratelen-8), pstat->bssrateset+8, &(pattrib->pktlen));
}
if ((pstat->flags & WLAN_STA_HT) && (pmlmepriv->htpriv.ht_option)) {
uint ie_len = 0;
/* FILL HT CAP INFO IE */
/* p = hostapd_eid_ht_capabilities_info(hapd, p); */
pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, WLAN_EID_HT_CAPABILITY, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_));
if (pbuf && ie_len > 0) {
memcpy(pframe, pbuf, ie_len+2);
pframe += (ie_len+2);
pattrib->pktlen += (ie_len+2);
}
/* FILL HT ADD INFO IE */
/* p = hostapd_eid_ht_operation(hapd, p); */
pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, WLAN_EID_HT_OPERATION, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_));
if (pbuf && ie_len > 0) {
memcpy(pframe, pbuf, ie_len+2);
pframe += (ie_len+2);
pattrib->pktlen += (ie_len+2);
}
}
/* FILL WMM IE */
if ((pstat->flags & WLAN_STA_WME) && (pmlmepriv->qospriv.qos_option)) {
uint ie_len = 0;
unsigned char WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
for (pbuf = ie + _BEACON_IE_OFFSET_; ; pbuf += (ie_len + 2)) {
pbuf = rtw_get_ie(pbuf, WLAN_EID_VENDOR_SPECIFIC, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2)));
if (pbuf && !memcmp(pbuf+2, WMM_PARA_IE, 6)) {
memcpy(pframe, pbuf, ie_len+2);
pframe += (ie_len+2);
pattrib->pktlen += (ie_len+2);
break;
}
if (!pbuf || ie_len == 0)
break;
}
}
if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
pframe = rtw_set_ie(pframe, WLAN_EID_VENDOR_SPECIFIC, 6, REALTEK_96B_IE, &(pattrib->pktlen));
/* add WPS IE ie for wps 2.0 */
if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
memcpy(pframe, pmlmepriv->wps_assoc_resp_ie, pmlmepriv->wps_assoc_resp_ie_len);
pframe += pmlmepriv->wps_assoc_resp_ie_len;
pattrib->pktlen += pmlmepriv->wps_assoc_resp_ie_len;
}
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
}
void issue_assocreq(struct adapter *padapter)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
__le16 val16;
unsigned int i, j, index = 0;
unsigned char bssrate[NumRates], sta_bssrate[NumRates];
struct ndis_80211_var_ie *pIE;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
int bssrate_len = 0, sta_bssrate_len = 0;
u8 vs_ie_length = 0;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
goto exit;
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ASSOCREQ);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
/* caps */
memcpy(pframe, rtw_get_capability_from_ie(pmlmeinfo->network.ies), 2);
pframe += 2;
pattrib->pktlen += 2;
/* listen interval */
/* todo: listen interval for power saving */
val16 = cpu_to_le16(3);
memcpy(pframe, (unsigned char *)&val16, 2);
pframe += 2;
pattrib->pktlen += 2;
/* SSID */
pframe = rtw_set_ie(pframe, WLAN_EID_SSID, pmlmeinfo->network.ssid.ssid_length, pmlmeinfo->network.ssid.ssid, &(pattrib->pktlen));
/* supported rate & extended supported rate */
/* Check if the AP's supported rates are also supported by STA. */
get_rate_set(padapter, sta_bssrate, &sta_bssrate_len);
if (pmlmeext->cur_channel == 14) /* for JAPAN, channel 14 can only uses B Mode(CCK) */
sta_bssrate_len = 4;
/* for (i = 0; i < sta_bssrate_len; i++) { */
/* */
for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
if (pmlmeinfo->network.supported_rates[i] == 0)
break;
}
for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
if (pmlmeinfo->network.supported_rates[i] == 0)
break;
/* Check if the AP's supported rates are also supported by STA. */
for (j = 0; j < sta_bssrate_len; j++) {
/* Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
if ((pmlmeinfo->network.supported_rates[i] | IEEE80211_BASIC_RATE_MASK)
== (sta_bssrate[j] | IEEE80211_BASIC_RATE_MASK))
break;
}
if (j != sta_bssrate_len)
/* the rate is supported by STA */
bssrate[index++] = pmlmeinfo->network.supported_rates[i];
}
bssrate_len = index;
if (bssrate_len == 0) {
rtw_free_xmitbuf(pxmitpriv, pmgntframe->pxmitbuf);
rtw_free_xmitframe(pxmitpriv, pmgntframe);
goto exit; /* don't connect to AP if no joint supported rate */
}
if (bssrate_len > 8) {
pframe = rtw_set_ie(pframe, WLAN_EID_SUPP_RATES, 8, bssrate, &(pattrib->pktlen));
pframe = rtw_set_ie(pframe, WLAN_EID_EXT_SUPP_RATES, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
} else
pframe = rtw_set_ie(pframe, WLAN_EID_SUPP_RATES, bssrate_len, bssrate, &(pattrib->pktlen));
/* vendor specific IE, such as WPA, WMM, WPS */
for (i = sizeof(struct ndis_802_11_fix_ie); i < pmlmeinfo->network.ie_length;) {
pIE = (struct ndis_80211_var_ie *)(pmlmeinfo->network.ies + i);
switch (pIE->element_id) {
case WLAN_EID_VENDOR_SPECIFIC:
if ((!memcmp(pIE->data, RTW_WPA_OUI, 4)) ||
(!memcmp(pIE->data, WMM_OUI, 4)) ||
(!memcmp(pIE->data, WPS_OUI, 4))) {
vs_ie_length = pIE->length;
if ((!padapter->registrypriv.wifi_spec) && (!memcmp(pIE->data, WPS_OUI, 4))) {
/* Commented by Kurt 20110629
* In some older APs, WPS handshake
* would be fail if we append vendor
* extensions information to AP
*/
vs_ie_length = 14;
}
pframe = rtw_set_ie(pframe, WLAN_EID_VENDOR_SPECIFIC, vs_ie_length, pIE->data, &(pattrib->pktlen));
}
break;
case WLAN_EID_RSN:
pframe = rtw_set_ie(pframe, WLAN_EID_RSN, pIE->length, pIE->data, &(pattrib->pktlen));
break;
case WLAN_EID_HT_CAPABILITY:
if (padapter->mlmepriv.htpriv.ht_option) {
if (!(is_ap_in_tkip(padapter))) {
memcpy(&(pmlmeinfo->HT_caps), pIE->data, sizeof(struct HT_caps_element));
pframe = rtw_set_ie(pframe, WLAN_EID_HT_CAPABILITY, pIE->length, (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
}
}
break;
case WLAN_EID_EXT_CAPABILITY:
if (padapter->mlmepriv.htpriv.ht_option)
pframe = rtw_set_ie(pframe, WLAN_EID_EXT_CAPABILITY, pIE->length, pIE->data, &(pattrib->pktlen));
break;
default:
break;
}
i += (pIE->length + 2);
}
if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
pframe = rtw_set_ie(pframe, WLAN_EID_VENDOR_SPECIFIC, 6, REALTEK_96B_IE, &(pattrib->pktlen));
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
ret = _SUCCESS;
exit:
if (ret == _SUCCESS)
rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
else
rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
}
/* when wait_ack is true, this function should be called at process context */
static int _issue_nulldata(struct adapter *padapter, unsigned char *da,
unsigned int power_mode, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv;
struct mlme_ext_priv *pmlmeext;
struct mlme_ext_info *pmlmeinfo;
if (!padapter)
goto exit;
pxmitpriv = &(padapter->xmitpriv);
pmlmeext = &(padapter->mlmeextpriv);
pmlmeinfo = &(pmlmeext->mlmext_info);
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
goto exit;
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
pattrib->retry_ctrl = false;
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
SetFrDs(fctrl);
else if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
SetToDs(fctrl);
if (power_mode)
SetPwrMgt(fctrl);
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_DATA_NULL);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->last_txcmdsz = pattrib->pktlen;
if (wait_ack) {
ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
} else {
dump_mgntframe(padapter, pmgntframe);
ret = _SUCCESS;
}
exit:
return ret;
}
/*
* [IMPORTANT] Don't call this function in interrupt context
*
* When wait_ms > 0, this function should be called at process context
* da == NULL for station mode
*/
int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
{
int ret;
int i = 0;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct sta_info *psta;
/* da == NULL, assume it's null data for sta to ap*/
if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
psta = rtw_get_stainfo(&padapter->stapriv, da);
if (psta) {
if (power_mode)
rtw_hal_macid_sleep(padapter, psta->mac_id);
else
rtw_hal_macid_wakeup(padapter, psta->mac_id);
} else {
rtw_warn_on(1);
}
do {
ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0);
i++;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
ret = _SUCCESS;
#ifndef DBG_XMIT_ACK
goto exit;
#endif
}
exit:
return ret;
}
/*
* [IMPORTANT] This function run in interrupt context
*
* The null data packet would be sent without power bit,
* and not guarantee success.
*/
s32 issue_nulldata_in_interrupt(struct adapter *padapter, u8 *da)
{
struct mlme_ext_priv *pmlmeext;
struct mlme_ext_info *pmlmeinfo;
pmlmeext = &padapter->mlmeextpriv;
pmlmeinfo = &pmlmeext->mlmext_info;
/* da == NULL, assume it's null data for sta to ap*/
if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
return _issue_nulldata(padapter, da, 0, false);
}
/* when wait_ack is true, this function should be called at process context */
static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
u16 tid, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u16 *qc;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
goto exit;
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
pattrib->hdrlen += 2;
pattrib->qos_en = true;
pattrib->eosp = 1;
pattrib->ack_policy = 0;
pattrib->mdata = 0;
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
SetFrDs(fctrl);
else if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
SetToDs(fctrl);
qc = (unsigned short *)(pframe + pattrib->hdrlen - 2);
SetPriority(qc, tid);
SetEOSP(qc, pattrib->eosp);
SetAckpolicy(qc, pattrib->ack_policy);
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
pframe += sizeof(struct ieee80211_qos_hdr);
pattrib->pktlen = sizeof(struct ieee80211_qos_hdr);
pattrib->last_txcmdsz = pattrib->pktlen;
if (wait_ack) {
ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
} else {
dump_mgntframe(padapter, pmgntframe);
ret = _SUCCESS;
}
exit:
return ret;
}
/* when wait_ms >0 , this function should be called at process context */
/* da == NULL for station mode */
int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
{
int ret;
int i = 0;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
/* da == NULL, assume it's null data for sta to ap*/
if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
do {
ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0);
i++;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
ret = _SUCCESS;
#ifndef DBG_XMIT_ACK
goto exit;
#endif
}
exit:
return ret;
}
static int _issue_deauth(struct adapter *padapter, unsigned char *da,
unsigned short reason, bool wait_ack)
{
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
int ret = _FAIL;
__le16 le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
goto exit;
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
pattrib->retry_ctrl = false;
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_DEAUTH);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
le_tmp = cpu_to_le16(reason);
pframe = rtw_set_fixed_ie(pframe, _RSON_CODE_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
pattrib->last_txcmdsz = pattrib->pktlen;
if (wait_ack) {
ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
} else {
dump_mgntframe(padapter, pmgntframe);
ret = _SUCCESS;
}
exit:
return ret;
}
int issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason)
{
return _issue_deauth(padapter, da, reason, false);
}
int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int try_cnt,
int wait_ms)
{
int ret;
int i = 0;
do {
ret = _issue_deauth(padapter, da, reason, wait_ms > 0);
i++;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
mdelay(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
ret = _SUCCESS;
#ifndef DBG_XMIT_ACK
goto exit;
#endif
}
exit:
return ret;
}
void issue_action_SA_Query(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short tid)
{
u8 category = RTW_WLAN_CATEGORY_SA_QUERY;
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
u8 *pframe;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
__le16 le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
return;
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
if (raddr)
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
else
memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
switch (action) {
case 0: /* SA Query req */
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&pmlmeext->sa_query_seq, &pattrib->pktlen);
pmlmeext->sa_query_seq++;
/* send sa query request to AP, AP should reply sa query response in 1 second */
set_sa_query_timer(pmlmeext, 1000);
break;
case 1: /* SA Query rsp */
le_tmp = cpu_to_le16(tid);
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen);
break;
default:
break;
}
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
}
void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short status)
{
u8 category = RTW_WLAN_CATEGORY_BACK;
u16 start_seq;
u16 BA_para_set;
u16 reason_code;
u16 BA_timeout_value;
u16 BA_starting_seqctrl = 0;
enum ieee80211_max_ampdu_length_exp max_rx_ampdu_factor;
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
u8 *pframe;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
struct registry_priv *pregpriv = &padapter->registrypriv;
__le16 le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
return;
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
/* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
if (category == 3) {
switch (action) {
case 0: /* ADDBA req */
do {
pmlmeinfo->dialogToken++;
} while (pmlmeinfo->dialogToken == 0);
pframe = rtw_set_fixed_ie(pframe, 1, &(pmlmeinfo->dialogToken), &(pattrib->pktlen));
if (hal_btcoex_IsBTCoexCtrlAMPDUSize(padapter)) {
/* A-MSDU NOT Supported */
BA_para_set = 0;
/* immediate Block Ack */
BA_para_set |= BIT(1) & IEEE80211_ADDBA_PARAM_POLICY_MASK;
/* TID */
BA_para_set |= (status << 2) & IEEE80211_ADDBA_PARAM_TID_MASK;
/* max buffer size is 8 MSDU */
BA_para_set |= (8 << 6) & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
} else {
BA_para_set = (0x1002 | ((status & 0xf) << 2)); /* immediate ack & 64 buffer size */
}
le_tmp = cpu_to_le16(BA_para_set);
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
BA_timeout_value = 5000;/* 5ms */
le_tmp = cpu_to_le16(BA_timeout_value);
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
/* if ((psta = rtw_get_stainfo(pstapriv, pmlmeinfo->network.mac_address)) != NULL) */
psta = rtw_get_stainfo(pstapriv, raddr);
if (psta) {
start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07]&0xfff) + 1;
psta->BA_starting_seqctrl[status & 0x07] = start_seq;
BA_starting_seqctrl = start_seq << 4;
}
le_tmp = cpu_to_le16(BA_starting_seqctrl);
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
break;
case 1: /* ADDBA rsp */
pframe = rtw_set_fixed_ie(pframe, 1, &(pmlmeinfo->ADDBA_req.dialog_token), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&status), &(pattrib->pktlen));
if (padapter->driver_rx_ampdu_factor != 0xFF)
max_rx_ampdu_factor =
(enum ieee80211_max_ampdu_length_exp)padapter->driver_rx_ampdu_factor;
else
rtw_hal_get_def_var(padapter,
HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_64K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_32K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0800); /* 32 buffer size */
else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_16K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0400); /* 16 buffer size */
else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_8K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0200); /* 8 buffer size */
else
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
if (hal_btcoex_IsBTCoexCtrlAMPDUSize(padapter) &&
padapter->driver_rx_ampdu_factor == 0xFF) {
/* max buffer size is 8 MSDU */
BA_para_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
BA_para_set |= (8 << 6) & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
}
if (pregpriv->ampdu_amsdu == 0)/* disabled */
le_tmp = cpu_to_le16(BA_para_set & ~BIT(0));
else if (pregpriv->ampdu_amsdu == 1)/* enabled */
le_tmp = cpu_to_le16(BA_para_set | BIT(0));
else /* auto */
le_tmp = cpu_to_le16(BA_para_set);
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(pmlmeinfo->ADDBA_req.BA_timeout_value)), &(pattrib->pktlen));
break;
case 2:/* DELBA */
BA_para_set = (status & 0x1F) << 3;
le_tmp = cpu_to_le16(BA_para_set);
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
reason_code = 37;
le_tmp = cpu_to_le16(reason_code);
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
break;
default:
break;
}
}
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
}
static void issue_action_BSSCoexistPacket(struct adapter *padapter)
{
struct list_head *plist, *phead;
unsigned char category, action;
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct wlan_network *pnetwork = NULL;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct __queue *queue = &(pmlmepriv->scanned_queue);
u8 InfoContent[16] = {0};
u8 ICS[8][15];
if ((pmlmepriv->num_FortyMHzIntolerant == 0) || (pmlmepriv->num_sta_no_ht == 0))
return;
if (true == pmlmeinfo->bwmode_updated)
return;
category = RTW_WLAN_CATEGORY_PUBLIC;
action = ACT_PUBLIC_BSSCOEXIST;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
return;
/* update attribute */
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
/* */
if (pmlmepriv->num_FortyMHzIntolerant > 0) {
u8 iedata = 0;
iedata |= BIT(2);/* 20 MHz BSS Width Request */
pframe = rtw_set_ie(pframe, WLAN_EID_BSS_COEX_2040, 1, &iedata, &(pattrib->pktlen));
}
/* */
memset(ICS, 0, sizeof(ICS));
if (pmlmepriv->num_sta_no_ht > 0) {
int i;
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
while (1) {
int len;
u8 *p;
struct wlan_bssid_ex *pbss_network;
if (phead == plist)
break;
pnetwork = container_of(plist, struct wlan_network, list);
plist = get_next(plist);
pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
p = rtw_get_ie(pbss_network->ies + _FIXED_IE_LENGTH_, WLAN_EID_HT_CAPABILITY, &len, pbss_network->ie_length - _FIXED_IE_LENGTH_);
if (!p || len == 0) {/* non-HT */
if (pbss_network->configuration.ds_config <= 0)
continue;
ICS[0][pbss_network->configuration.ds_config] = 1;
if (ICS[0][0] == 0)
ICS[0][0] = 1;
}
}
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
for (i = 0; i < 8; i++) {
if (ICS[i][0] == 1) {
int j, k = 0;
InfoContent[k] = i;
/* SET_BSS_INTOLERANT_ELE_REG_CLASS(InfoContent, i); */
k++;
for (j = 1; j <= 14; j++) {
if (ICS[i][j] == 1) {
if (k < 16) {
InfoContent[k] = j; /* channel number */
/* SET_BSS_INTOLERANT_ELE_CHANNEL(InfoContent+k, j); */
k++;
}
}
}
pframe = rtw_set_ie(pframe, WLAN_EID_BSS_INTOLERANT_CHL_REPORT, k, InfoContent, &(pattrib->pktlen));
}
}
}
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
}
unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
{
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta = NULL;
/* struct recv_reorder_ctrl *preorder_ctrl; */
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u16 tid;
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
return _SUCCESS;
psta = rtw_get_stainfo(pstapriv, addr);
if (!psta)
return _SUCCESS;
if (initiator == 0) {/* recipient */
for (tid = 0; tid < MAXTID; tid++) {
if (psta->recvreorder_ctrl[tid].enable) {
issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, (((tid << 1) | initiator)&0x1F));
psta->recvreorder_ctrl[tid].enable = false;
psta->recvreorder_ctrl[tid].indicate_seq = 0xffff;
}
}
} else if (initiator == 1) {/* originator */
for (tid = 0; tid < MAXTID; tid++) {
if (psta->htpriv.agg_enable_bitmap & BIT(tid)) {
issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, (((tid << 1) | initiator)&0x1F));
psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
}
}
}
return _SUCCESS;
}
unsigned int send_beacon(struct adapter *padapter)
{
u8 bxmitok = false;
int issue = 0;
int poll = 0;
rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
rtw_hal_set_hwreg(padapter, HW_VAR_DL_BCN_SEL, NULL);
do {
issue_beacon(padapter, 100);
issue++;
do {
cond_resched();
rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
poll++;
} while ((poll%10) != 0 && false == bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
} while (false == bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
return _FAIL;
if (!bxmitok)
return _FAIL;
else
return _SUCCESS;
}
/****************************************************************************
Following are some utility functions for WiFi MLME
*****************************************************************************/
void site_survey(struct adapter *padapter)
{
unsigned char survey_channel = 0, val8;
enum rt_scan_type ScanType = SCAN_PASSIVE;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u32 initialgain = 0;
u32 channel_scan_time_ms = 0;
{
struct rtw_ieee80211_channel *ch;
if (pmlmeext->sitesurvey_res.channel_idx < pmlmeext->sitesurvey_res.ch_num) {
ch = &pmlmeext->sitesurvey_res.ch[pmlmeext->sitesurvey_res.channel_idx];
survey_channel = ch->hw_value;
ScanType = (ch->flags & RTW_IEEE80211_CHAN_PASSIVE_SCAN) ? SCAN_PASSIVE : SCAN_ACTIVE;
}
}
if (survey_channel != 0) {
/* PAUSE 4-AC Queue when site_survey */
/* rtw_hal_get_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
/* val8 |= 0x0f; */
/* rtw_hal_set_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
if (pmlmeext->sitesurvey_res.channel_idx == 0) {
#ifdef DBG_FIXED_CHAN
if (pmlmeext->fixed_chan != 0xff)
set_channel_bwmode(padapter, pmlmeext->fixed_chan, HAL_PRIME_CHNL_OFFSET_DONT_CARE, CHANNEL_WIDTH_20);
else
#endif
set_channel_bwmode(padapter, survey_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, CHANNEL_WIDTH_20);
} else {
#ifdef DBG_FIXED_CHAN
if (pmlmeext->fixed_chan != 0xff)
SelectChannel(padapter, pmlmeext->fixed_chan);
else
#endif
SelectChannel(padapter, survey_channel);
}
if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */
{
int i;
for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
if (pmlmeext->sitesurvey_res.ssid[i].ssid_length) {
/* IOT issue, When wifi_spec is not set, send one probe req without WPS IE. */
if (padapter->registrypriv.wifi_spec)
issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
else
issue_probereq_ex(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL, 0, 0, 0, 0);
issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
}
}
if (pmlmeext->sitesurvey_res.scan_mode == SCAN_ACTIVE) {
/* IOT issue, When wifi_spec is not set, send one probe req without WPS IE. */
if (padapter->registrypriv.wifi_spec)
issue_probereq(padapter, NULL, NULL);
else
issue_probereq_ex(padapter, NULL, NULL, 0, 0, 0, 0);
issue_probereq(padapter, NULL, NULL);
}
}
}
channel_scan_time_ms = pmlmeext->chan_scan_time;
set_survey_timer(pmlmeext, channel_scan_time_ms);
} else {
/* channel number is 0 or this channel is not valid. */
{
pmlmeext->sitesurvey_res.state = SCAN_COMPLETE;
/* switch back to the original channel */
/* SelectChannel(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset); */
set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
/* flush 4-AC Queue after site_survey */
/* val8 = 0; */
/* rtw_hal_set_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
/* config MSR */
Set_MSR(padapter, (pmlmeinfo->state & 0x3));
initialgain = 0xff; /* restore RX GAIN */
rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
/* turn on dynamic functions */
Restore_DM_Func_Flag(padapter);
/* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
if (is_client_associated_to_ap(padapter))
issue_nulldata(padapter, NULL, 0, 3, 500);
val8 = 0; /* survey done */
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
report_surveydone_event(padapter);
pmlmeext->chan_scan_time = SURVEY_TO;
pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
issue_action_BSSCoexistPacket(padapter);
issue_action_BSSCoexistPacket(padapter);
issue_action_BSSCoexistPacket(padapter);
}
}
return;
}
/* collect bss info from Beacon and Probe request/response frames. */
u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, struct wlan_bssid_ex *bssid)
{
int i;
u32 len;
u8 *p;
u16 val16, subtype;
u8 *pframe = precv_frame->u.hdr.rx_data;
u32 packet_len = precv_frame->u.hdr.len;
u8 ie_offset;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
__le32 le32_tmp;
len = packet_len - sizeof(struct ieee80211_hdr_3addr);
if (len > MAX_IE_SZ)
return _FAIL;
memset(bssid, 0, sizeof(struct wlan_bssid_ex));
subtype = GetFrameSubType(pframe);
if (subtype == WIFI_BEACON) {
bssid->reserved[0] = 1;
ie_offset = _BEACON_IE_OFFSET_;
} else {
/* FIXME : more type */
if (subtype == WIFI_PROBERSP) {
ie_offset = _PROBERSP_IE_OFFSET_;
bssid->reserved[0] = 3;
} else if (subtype == WIFI_PROBEREQ) {
ie_offset = _PROBEREQ_IE_OFFSET_;
bssid->reserved[0] = 2;
} else {
bssid->reserved[0] = 0;
ie_offset = _FIXED_IE_LENGTH_;
}
}
bssid->length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
/* below is to copy the information element */
bssid->ie_length = len;
memcpy(bssid->ies, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->ie_length);
/* get the signal strength */
bssid->rssi = precv_frame->u.hdr.attrib.phy_info.RecvSignalPower; /* in dBM.raw data */
bssid->phy_info.signal_quality = precv_frame->u.hdr.attrib.phy_info.SignalQuality;/* in percentage */
bssid->phy_info.signal_strength = precv_frame->u.hdr.attrib.phy_info.SignalStrength;/* in percentage */
/* checking SSID */
p = rtw_get_ie(bssid->ies + ie_offset, WLAN_EID_SSID, &len, bssid->ie_length - ie_offset);
if (!p)
return _FAIL;
if (*(p + 1)) {
if (len > NDIS_802_11_LENGTH_SSID)
return _FAIL;
memcpy(bssid->ssid.ssid, (p + 2), *(p + 1));
bssid->ssid.ssid_length = *(p + 1);
} else
bssid->ssid.ssid_length = 0;
memset(bssid->supported_rates, 0, NDIS_802_11_LENGTH_RATES_EX);
/* checking rate info... */
i = 0;
p = rtw_get_ie(bssid->ies + ie_offset, WLAN_EID_SUPP_RATES, &len, bssid->ie_length - ie_offset);
if (p) {
if (len > NDIS_802_11_LENGTH_RATES_EX)
return _FAIL;
memcpy(bssid->supported_rates, (p + 2), len);
i = len;
}
p = rtw_get_ie(bssid->ies + ie_offset, WLAN_EID_EXT_SUPP_RATES, &len, bssid->ie_length - ie_offset);
if (p) {
if (len > (NDIS_802_11_LENGTH_RATES_EX-i))
return _FAIL;
memcpy(bssid->supported_rates + i, (p + 2), len);
}
bssid->network_type_in_use = Ndis802_11OFDM24;
if (bssid->ie_length < 12)
return _FAIL;
/* Checking for ds_config */
p = rtw_get_ie(bssid->ies + ie_offset, WLAN_EID_DS_PARAMS, &len, bssid->ie_length - ie_offset);
bssid->configuration.ds_config = 0;
bssid->configuration.length = 0;
if (p) {
bssid->configuration.ds_config = *(p + 2);
} else {
/* In 5G, some ap do not have DSSET IE */
/* checking HT info for channel */
p = rtw_get_ie(bssid->ies + ie_offset, WLAN_EID_HT_OPERATION, &len, bssid->ie_length - ie_offset);
if (p) {
struct HT_info_element *HT_info = (struct HT_info_element *)(p + 2);
bssid->configuration.ds_config = HT_info->primary_channel;
} else { /* use current channel */
bssid->configuration.ds_config = rtw_get_oper_ch(padapter);
}
}
memcpy(&le32_tmp, rtw_get_beacon_interval_from_ie(bssid->ies), 2);
bssid->configuration.beacon_period = le32_to_cpu(le32_tmp);
val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
if (val16 & BIT(0)) {
bssid->infrastructure_mode = Ndis802_11Infrastructure;
memcpy(bssid->mac_address, GetAddr2Ptr(pframe), ETH_ALEN);
} else {
bssid->infrastructure_mode = Ndis802_11IBSS;
memcpy(bssid->mac_address, GetAddr3Ptr(pframe), ETH_ALEN);
}
if (val16 & BIT(4))
bssid->privacy = 1;
else
bssid->privacy = 0;
bssid->configuration.atim_window = 0;
/* 20/40 BSS Coexistence check */
if ((pregistrypriv->wifi_spec == 1) && (false == pmlmeinfo->bwmode_updated)) {
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
p = rtw_get_ie(bssid->ies + ie_offset, WLAN_EID_HT_CAPABILITY, &len, bssid->ie_length - ie_offset);
if (p && len > 0) {
struct HT_caps_element *pHT_caps;
pHT_caps = (struct HT_caps_element *)(p + 2);
if (le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info) & BIT(14))
pmlmepriv->num_FortyMHzIntolerant++;
} else
pmlmepriv->num_sta_no_ht++;
}
/* mark bss info receiving from nearby channel as signal_quality 101 */
if (bssid->configuration.ds_config != rtw_get_oper_ch(padapter))
bssid->phy_info.signal_quality = 101;
return _SUCCESS;
}
void start_create_ibss(struct adapter *padapter)
{
unsigned short caps;
u8 val8;
u8 join_type;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
pmlmeext->cur_channel = (u8)pnetwork->configuration.ds_config;
pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
/* update wireless mode */
update_wireless_mode(padapter);
/* update capability */
caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
update_capinfo(padapter, caps);
if (caps&WLAN_CAPABILITY_IBSS) {/* adhoc master */
val8 = 0xcf;
rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
rtw_hal_set_hwreg(padapter, HW_VAR_DO_IQK, NULL);
/* switch channel */
/* SelectChannel(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE); */
set_channel_bwmode(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, CHANNEL_WIDTH_20);
beacon_timing_control(padapter);
/* set msr to WIFI_FW_ADHOC_STATE */
pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
Set_MSR(padapter, (pmlmeinfo->state & 0x3));
/* issue beacon */
if (send_beacon(padapter) == _FAIL) {
report_join_res(padapter, -1);
pmlmeinfo->state = WIFI_FW_NULL_STATE;
} else {
rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, padapter->registrypriv.dev_network.mac_address);
join_type = 0;
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
report_join_res(padapter, 1);
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
rtw_indicate_connect(padapter);
}
} else {
return;
}
/* update bc/mc sta_info */
update_bmc_sta(padapter);
}
void start_clnt_join(struct adapter *padapter)
{
unsigned short caps;
u8 val8;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
int beacon_timeout;
/* update wireless mode */
update_wireless_mode(padapter);
/* update capability */
caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
update_capinfo(padapter, caps);
if (caps&WLAN_CAPABILITY_ESS) {
Set_MSR(padapter, WIFI_FW_STATION_STATE);
val8 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_8021X) ? 0xcc : 0xcf;
rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
/* Because of AP's not receiving deauth before */
/* AP may: 1)not response auth or 2)deauth us after link is complete */
/* issue deauth before issuing auth to deal with the situation */
/* Commented by Albert 2012/07/21 */
/* For the Win8 P2P connection, it will be hard to have a successful connection if this Wi-Fi doesn't connect to it. */
{
/* To avoid connecting to AP fail during resume process, change retry count from 5 to 1 */
issue_deauth_ex(padapter, pnetwork->mac_address, WLAN_REASON_DEAUTH_LEAVING, 1, 100);
}
/* here wait for receiving the beacon to start auth */
/* and enable a timer */
beacon_timeout = decide_wait_for_beacon_timeout(pmlmeinfo->bcn_interval);
set_link_timer(pmlmeext, beacon_timeout);
_set_timer(&padapter->mlmepriv.assoc_timer,
(REAUTH_TO * REAUTH_LIMIT) + (REASSOC_TO*REASSOC_LIMIT) + beacon_timeout);
pmlmeinfo->state = WIFI_FW_AUTH_NULL | WIFI_FW_STATION_STATE;
} else if (caps&WLAN_CAPABILITY_IBSS) { /* adhoc client */
Set_MSR(padapter, WIFI_FW_ADHOC_STATE);
val8 = 0xcf;
rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
beacon_timing_control(padapter);
pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
report_join_res(padapter, 1);
} else {
return;
}
}
void start_clnt_auth(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
del_timer_sync(&pmlmeext->link_timer);
pmlmeinfo->state &= (~WIFI_FW_AUTH_NULL);
pmlmeinfo->state |= WIFI_FW_AUTH_STATE;
pmlmeinfo->auth_seq = 1;
pmlmeinfo->reauth_count = 0;
pmlmeinfo->reassoc_count = 0;
pmlmeinfo->link_count = 0;
pmlmeext->retry = 0;
netdev_dbg(padapter->pnetdev, "start auth\n");
issue_auth(padapter, NULL, 0);
set_link_timer(pmlmeext, REAUTH_TO);
}
void start_clnt_assoc(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
del_timer_sync(&pmlmeext->link_timer);
pmlmeinfo->state &= (~(WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE));
pmlmeinfo->state |= (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE);
issue_assocreq(padapter);
set_link_timer(pmlmeext, REASSOC_TO);
}
unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
/* check A3 */
if (!(!memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
return _SUCCESS;
if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) {
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
pmlmeinfo->state = WIFI_FW_NULL_STATE;
report_del_sta_event(padapter, MacAddr, reason);
} else if (pmlmeinfo->state & WIFI_FW_LINKING_STATE) {
pmlmeinfo->state = WIFI_FW_NULL_STATE;
report_join_res(padapter, -2);
}
}
return _SUCCESS;
}
static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid)
{
struct registry_priv *pregistrypriv;
struct mlme_ext_priv *pmlmeext;
struct rt_channel_info *chplan_new;
u8 channel;
u8 i;
pregistrypriv = &padapter->registrypriv;
pmlmeext = &padapter->mlmeextpriv;
/* Adjust channel plan by AP Country IE */
if (pregistrypriv->enable80211d &&
(!pmlmeext->update_channel_plan_by_ap_done)) {
u8 *ie, *p;
u32 len;
struct rt_channel_plan chplan_ap;
struct rt_channel_info chplan_sta[MAX_CHANNEL_NUM];
u8 country[4];
u8 fcn; /* first channel number */
u8 noc; /* number of channel */
u8 j, k;
ie = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, WLAN_EID_COUNTRY, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
if (!ie)
return;
if (len < 6)
return;
ie += 2;
p = ie;
ie += len;
memset(country, 0, 4);
memcpy(country, p, 3);
p += 3;
i = 0;
while ((ie - p) >= 3) {
fcn = *(p++);
noc = *(p++);
p++;
for (j = 0; j < noc; j++) {
if (fcn <= 14)
channel = fcn + j; /* 2.4 GHz */
else
channel = fcn + j*4; /* 5 GHz */
chplan_ap.Channel[i++] = channel;
}
}
chplan_ap.Len = i;
memcpy(chplan_sta, pmlmeext->channel_set, sizeof(chplan_sta));
memset(pmlmeext->channel_set, 0, sizeof(pmlmeext->channel_set));
chplan_new = pmlmeext->channel_set;
i = j = k = 0;
if (pregistrypriv->wireless_mode & WIRELESS_11G) {
do {
if ((i == MAX_CHANNEL_NUM) ||
(chplan_sta[i].ChannelNum == 0) ||
(chplan_sta[i].ChannelNum > 14))
break;
if ((j == chplan_ap.Len) || (chplan_ap.Channel[j] > 14))
break;
if (chplan_sta[i].ChannelNum == chplan_ap.Channel[j]) {
chplan_new[k].ChannelNum = chplan_ap.Channel[j];
chplan_new[k].ScanType = SCAN_ACTIVE;
i++;
j++;
k++;
} else if (chplan_sta[i].ChannelNum < chplan_ap.Channel[j]) {
chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
/* chplan_new[k].ScanType = chplan_sta[i].ScanType; */
chplan_new[k].ScanType = SCAN_PASSIVE;
i++;
k++;
} else if (chplan_sta[i].ChannelNum > chplan_ap.Channel[j]) {
chplan_new[k].ChannelNum = chplan_ap.Channel[j];
chplan_new[k].ScanType = SCAN_ACTIVE;
j++;
k++;
}
} while (1);
/* change AP not support channel to Passive scan */
while ((i < MAX_CHANNEL_NUM) &&
(chplan_sta[i].ChannelNum != 0) &&
(chplan_sta[i].ChannelNum <= 14)) {
chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
/* chplan_new[k].ScanType = chplan_sta[i].ScanType; */
chplan_new[k].ScanType = SCAN_PASSIVE;
i++;
k++;
}
/* add channel AP supported */
while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14)) {
chplan_new[k].ChannelNum = chplan_ap.Channel[j];
chplan_new[k].ScanType = SCAN_ACTIVE;
j++;
k++;
}
} else {
/* keep original STA 2.4G channel plan */
while ((i < MAX_CHANNEL_NUM) &&
(chplan_sta[i].ChannelNum != 0) &&
(chplan_sta[i].ChannelNum <= 14)) {
chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
chplan_new[k].ScanType = chplan_sta[i].ScanType;
i++;
k++;
}
/* skip AP 2.4G channel plan */
while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14))
j++;
}
pmlmeext->update_channel_plan_by_ap_done = 1;
}
/* If channel is used by AP, set channel scan type to active */
channel = bssid->configuration.ds_config;
chplan_new = pmlmeext->channel_set;
i = 0;
while ((i < MAX_CHANNEL_NUM) && (chplan_new[i].ChannelNum != 0)) {
if (chplan_new[i].ChannelNum == channel) {
if (chplan_new[i].ScanType == SCAN_PASSIVE)
chplan_new[i].ScanType = SCAN_ACTIVE;
break;
}
i++;
}
}
/****************************************************************************
Following are the functions to report events
*****************************************************************************/
void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
u32 cmdsz;
struct survey_event *psurvey_evt;
struct C2HEvent_Header *pc2h_evt_hdr;
struct mlme_ext_priv *pmlmeext;
struct cmd_priv *pcmdpriv;
/* u8 *pframe = precv_frame->u.hdr.rx_data; */
/* uint len = precv_frame->u.hdr.len; */
if (!padapter)
return;
pmlmeext = &padapter->mlmeextpriv;
pcmdpriv = &padapter->cmdpriv;
pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (!pcmd_obj)
return;
cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
INIT_LIST_HEAD(&pcmd_obj->list);
pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
pcmd_obj->cmdsz = cmdsz;
pcmd_obj->parmbuf = pevtcmd;
pcmd_obj->rsp = NULL;
pcmd_obj->rspsz = 0;
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct survey_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
if (collect_bss_info(padapter, precv_frame, (struct wlan_bssid_ex *)&psurvey_evt->bss) == _FAIL) {
kfree(pcmd_obj);
kfree(pevtcmd);
return;
}
process_80211d(padapter, &psurvey_evt->bss);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
pmlmeext->sitesurvey_res.bss_cnt++;
return;
}
void report_surveydone_event(struct adapter *padapter)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
u32 cmdsz;
struct surveydone_event *psurveydone_evt;
struct C2HEvent_Header *pc2h_evt_hdr;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (!pcmd_obj)
return;
cmdsz = (sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
INIT_LIST_HEAD(&pcmd_obj->list);
pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
pcmd_obj->cmdsz = cmdsz;
pcmd_obj->parmbuf = pevtcmd;
pcmd_obj->rsp = NULL;
pcmd_obj->rspsz = 0;
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct surveydone_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
return;
}
void report_join_res(struct adapter *padapter, int res)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
u32 cmdsz;
struct joinbss_event *pjoinbss_evt;
struct C2HEvent_Header *pc2h_evt_hdr;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (!pcmd_obj)
return;
cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
INIT_LIST_HEAD(&pcmd_obj->list);
pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
pcmd_obj->cmdsz = cmdsz;
pcmd_obj->parmbuf = pevtcmd;
pcmd_obj->rsp = NULL;
pcmd_obj->rspsz = 0;
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct joinbss_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
memcpy((unsigned char *)(&(pjoinbss_evt->network.network)), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
pjoinbss_evt->network.join_res = pjoinbss_evt->network.aid = res;
rtw_joinbss_event_prehandle(padapter, (u8 *)&pjoinbss_evt->network);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
return;
}
void report_wmm_edca_update(struct adapter *padapter)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
u32 cmdsz;
struct wmm_event *pwmm_event;
struct C2HEvent_Header *pc2h_evt_hdr;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (!pcmd_obj)
return;
cmdsz = (sizeof(struct wmm_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
INIT_LIST_HEAD(&pcmd_obj->list);
pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
pcmd_obj->cmdsz = cmdsz;
pcmd_obj->parmbuf = pevtcmd;
pcmd_obj->rsp = NULL;
pcmd_obj->rspsz = 0;
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct wmm_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_WMM);
pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
pwmm_event = (struct wmm_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
pwmm_event->wmm = 0;
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
return;
}
void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
u32 cmdsz;
struct sta_info *psta;
int mac_id;
struct stadel_event *pdel_sta_evt;
struct C2HEvent_Header *pc2h_evt_hdr;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (!pcmd_obj)
return;
cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
INIT_LIST_HEAD(&pcmd_obj->list);
pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
pcmd_obj->cmdsz = cmdsz;
pcmd_obj->parmbuf = pevtcmd;
pcmd_obj->rsp = NULL;
pcmd_obj->rspsz = 0;
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct stadel_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
memcpy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr, ETH_ALEN);
memcpy((unsigned char *)(pdel_sta_evt->rsvd), (unsigned char *)(&reason), 2);
psta = rtw_get_stainfo(&padapter->stapriv, MacAddr);
if (psta)
mac_id = (int)psta->mac_id;
else
mac_id = (-1);
pdel_sta_evt->mac_id = mac_id;
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
}
void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
u32 cmdsz;
struct stassoc_event *padd_sta_evt;
struct C2HEvent_Header *pc2h_evt_hdr;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (!pcmd_obj)
return;
cmdsz = (sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
INIT_LIST_HEAD(&pcmd_obj->list);
pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
pcmd_obj->cmdsz = cmdsz;
pcmd_obj->parmbuf = pevtcmd;
pcmd_obj->rsp = NULL;
pcmd_obj->rspsz = 0;
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct stassoc_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
memcpy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr, ETH_ALEN);
padd_sta_evt->cam_id = cam_idx;
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
}
/****************************************************************************
Following are the event callback functions
*****************************************************************************/
/* for sta/adhoc mode */
void update_sta_info(struct adapter *padapter, struct sta_info *psta)
{
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
/* ERP */
VCS_update(padapter, psta);
/* HT */
if (pmlmepriv->htpriv.ht_option) {
psta->htpriv.ht_option = true;
psta->htpriv.ampdu_enable = pmlmepriv->htpriv.ampdu_enable;
psta->htpriv.rx_ampdu_min_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para&IEEE80211_HT_CAP_AMPDU_DENSITY)>>2;
if (support_short_GI(padapter, &(pmlmeinfo->HT_caps), CHANNEL_WIDTH_20))
psta->htpriv.sgi_20m = true;
if (support_short_GI(padapter, &(pmlmeinfo->HT_caps), CHANNEL_WIDTH_40))
psta->htpriv.sgi_40m = true;
psta->qos_option = true;
psta->htpriv.ldpc_cap = pmlmepriv->htpriv.ldpc_cap;
psta->htpriv.stbc_cap = pmlmepriv->htpriv.stbc_cap;
psta->htpriv.beamform_cap = pmlmepriv->htpriv.beamform_cap;
memcpy(&psta->htpriv.ht_cap, &pmlmeinfo->HT_caps, sizeof(struct ieee80211_ht_cap));
} else {
psta->htpriv.ht_option = false;
psta->htpriv.ampdu_enable = false;
psta->htpriv.sgi_20m = false;
psta->htpriv.sgi_40m = false;
psta->qos_option = false;
}
psta->htpriv.ch_offset = pmlmeext->cur_ch_offset;
psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
psta->bw_mode = pmlmeext->cur_bwmode;
/* QoS */
if (pmlmepriv->qospriv.qos_option)
psta->qos_option = true;
update_ldpc_stbc_cap(psta);
spin_lock_bh(&psta->lock);
psta->state = _FW_LINKED;
spin_unlock_bh(&psta->lock);
}
static void rtw_mlmeext_disconnect(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
/* set_opmode_cmd(padapter, infra_client_with_mlme); */
/* For safety, prevent from keeping macid sleep.
* If we can sure all power mode enter/leave are paired,
* this check can be removed.
* Lucas@20131113
*/
/* wakeup macid after disconnect. */
{
struct sta_info *psta;
psta = rtw_get_stainfo(&padapter->stapriv, get_my_bssid(pnetwork));
if (psta)
rtw_hal_macid_wakeup(padapter, psta->mac_id);
}
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
/* set MSR to no link state -> infra. mode */
Set_MSR(padapter, _HW_STATE_STATION_);
pmlmeinfo->state = WIFI_FW_NULL_STATE;
/* switch to the 20M Hz mode after disconnect */
pmlmeext->cur_bwmode = CHANNEL_WIDTH_20;
pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
flush_all_cam_entry(padapter);
del_timer_sync(&pmlmeext->link_timer);
/* pmlmepriv->LinkDetectInfo.TrafficBusyState = false; */
pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 0;
pmlmepriv->LinkDetectInfo.LowPowerTransitionCount = 0;
}
void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
struct sta_priv *pstapriv = &padapter->stapriv;
u8 join_type;
struct sta_info *psta;
if (join_res < 0) {
join_type = 1;
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
return;
}
if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)
/* update bc/mc sta_info */
update_bmc_sta(padapter);
/* turn on dynamic functions */
Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
/* update IOT-related issue */
update_IOT_info(padapter);
rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, cur_network->supported_rates);
/* BCN interval */
rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
/* update capability */
update_capinfo(padapter, pmlmeinfo->capability);
/* WMM, Update EDCA param */
WMMOnAssocRsp(padapter);
/* HT */
HTOnAssocRsp(padapter);
/* Set cur_channel&cur_bwmode&cur_ch_offset */
set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
psta = rtw_get_stainfo(pstapriv, cur_network->mac_address);
if (psta) { /* only for infra. mode */
pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
psta->wireless_mode = pmlmeext->cur_wireless_mode;
/* set per sta rate after updating HT cap. */
set_sta_rate(padapter, psta);
rtw_sta_media_status_rpt(padapter, psta, 1);
/* wakeup macid after join bss successfully to ensure
the subsequent data frames can be sent out normally */
rtw_hal_macid_wakeup(padapter, psta->mac_id);
}
join_type = 2;
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) {
/* correcting TSF */
correct_TSF(padapter, pmlmeext);
/* set_link_timer(pmlmeext, DISCONNECT_TO); */
}
if (get_iface_type(padapter) == IFACE_PORT0)
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_CONNECT, 0);
}
/* currently only adhoc mode will go here */
void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *psta)
{
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 join_type;
if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) { /* adhoc master or sta_count>1 */
/* nothing to do */
} else { /* adhoc client */
/* update TSF Value */
/* update_TSF(pmlmeext, pframe, len); */
/* correcting TSF */
correct_TSF(padapter, pmlmeext);
/* start beacon */
if (send_beacon(padapter) == _FAIL) {
pmlmeinfo->FW_sta_info[psta->mac_id].status = 0;
pmlmeinfo->state ^= WIFI_FW_ADHOC_STATE;
return;
}
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
}
join_type = 2;
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
}
pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
psta->bssratelen = rtw_get_rateset_len(pmlmeinfo->FW_sta_info[psta->mac_id].SupportedRates);
memcpy(psta->bssrateset, pmlmeinfo->FW_sta_info[psta->mac_id].SupportedRates, psta->bssratelen);
/* update adhoc sta_info */
update_sta_info(padapter, psta);
rtw_hal_update_sta_rate_mask(padapter, psta);
/* ToDo: HT for Ad-hoc */
psta->wireless_mode = rtw_check_network_type(psta->bssrateset, psta->bssratelen, pmlmeext->cur_channel);
psta->raid = networktype_to_raid_ex(padapter, psta);
/* rate radaptive */
Update_RA_Entry(padapter, psta);
}
void mlmeext_sta_del_event_callback(struct adapter *padapter)
{
if (is_client_associated_to_ap(padapter) || is_IBSS_empty(padapter))
rtw_mlmeext_disconnect(padapter);
}
/****************************************************************************
Following are the functions for the timer handlers
*****************************************************************************/
void _linked_info_dump(struct adapter *padapter)
{
int i;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
int UndecoratedSmoothedPWDB;
struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
if (padapter->bLinkInfoDump) {
if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
rtw_hal_get_def_var(padapter, HAL_DEF_UNDERCORATEDSMOOTHEDPWDB, &UndecoratedSmoothedPWDB);
for (i = 0; i < NUM_STA; i++) {
if (pdvobj->macid[i]) {
if (i != 1) /* skip bc/mc sta */
/* tx info ============ */
rtw_hal_get_def_var(padapter, HW_DEF_RA_INFO_DUMP, &i);
}
}
rtw_hal_set_def_var(padapter, HAL_DEF_DBG_RX_INFO_DUMP, NULL);
}
}
static u8 chk_ap_is_alive(struct adapter *padapter, struct sta_info *psta)
{
u8 ret = false;
if ((sta_rx_data_pkts(psta) == sta_last_rx_data_pkts(psta))
&& sta_rx_beacon_pkts(psta) == sta_last_rx_beacon_pkts(psta)
&& sta_rx_probersp_pkts(psta) == sta_last_rx_probersp_pkts(psta)
) {
ret = false;
} else {
ret = true;
}
sta_update_last_rx_pkts(psta);
return ret;
}
void linked_status_chk(struct adapter *padapter)
{
u32 i;
struct sta_info *psta;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct sta_priv *pstapriv = &padapter->stapriv;
if (is_client_associated_to_ap(padapter)) {
/* linked infrastructure client mode */
int tx_chk = _SUCCESS, rx_chk = _SUCCESS;
int rx_chk_limit;
int link_count_limit;
#if defined(DBG_ROAMING_TEST)
rx_chk_limit = 1;
#else
rx_chk_limit = 8;
#endif
link_count_limit = 7; /* 16 sec */
/* Marked by Kurt 20130715 */
/* For WiDi 3.5 and latered on, they don't ask WiDi sink to do roaming, so we could not check rx limit that strictly. */
/* todo: To check why we under miracast session, rx_chk would be false */
psta = rtw_get_stainfo(pstapriv, pmlmeinfo->network.mac_address);
if (psta) {
if (chk_ap_is_alive(padapter, psta) == false)
rx_chk = _FAIL;
if (pxmitpriv->last_tx_pkts == pxmitpriv->tx_pkts)
tx_chk = _FAIL;
{
if (rx_chk != _SUCCESS) {
if (pmlmeext->retry == 0) {
issue_probereq_ex(padapter, &pmlmeinfo->network.ssid, pmlmeinfo->network.mac_address, 0, 0, 0, 0);
issue_probereq_ex(padapter, &pmlmeinfo->network.ssid, pmlmeinfo->network.mac_address, 0, 0, 0, 0);
issue_probereq_ex(padapter, &pmlmeinfo->network.ssid, pmlmeinfo->network.mac_address, 0, 0, 0, 0);
}
}
if (tx_chk != _SUCCESS &&
pmlmeinfo->link_count++ == link_count_limit)
tx_chk = issue_nulldata_in_interrupt(padapter, NULL);
}
if (rx_chk == _FAIL) {
pmlmeext->retry++;
if (pmlmeext->retry > rx_chk_limit) {
netdev_dbg(padapter->pnetdev,
FUNC_ADPT_FMT " disconnect or roaming\n",
FUNC_ADPT_ARG(padapter));
receive_disconnect(padapter, pmlmeinfo->network.mac_address
, WLAN_REASON_EXPIRATION_CHK);
return;
}
} else {
pmlmeext->retry = 0;
}
if (tx_chk == _FAIL) {
pmlmeinfo->link_count %= (link_count_limit+1);
} else {
pxmitpriv->last_tx_pkts = pxmitpriv->tx_pkts;
pmlmeinfo->link_count = 0;
}
} /* end of if ((psta = rtw_get_stainfo(pstapriv, passoc_res->network.mac_address)) != NULL) */
} else if (is_client_associated_to_ibss(padapter)) {
/* linked IBSS mode */
/* for each assoc list entry to check the rx pkt counter */
for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
if (pmlmeinfo->FW_sta_info[i].status == 1) {
psta = pmlmeinfo->FW_sta_info[i].psta;
if (psta == NULL)
continue;
if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
if (pmlmeinfo->FW_sta_info[i].retry < 3) {
pmlmeinfo->FW_sta_info[i].retry++;
} else {
pmlmeinfo->FW_sta_info[i].retry = 0;
pmlmeinfo->FW_sta_info[i].status = 0;
report_del_sta_event(padapter, psta->hwaddr
, 65535/* indicate disconnect caused by no rx */
);
}
} else {
pmlmeinfo->FW_sta_info[i].retry = 0;
pmlmeinfo->FW_sta_info[i].rx_pkt = (u32)sta_rx_pkts(psta);
}
}
}
/* set_link_timer(pmlmeext, DISCONNECT_TO); */
}
}
void survey_timer_hdl(struct timer_list *t)
{
struct adapter *padapter =
from_timer(padapter, t, mlmeextpriv.survey_timer);
struct cmd_obj *ph2c;
struct sitesurvey_parm *psurveyPara;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
/* issue rtw_sitesurvey_cmd */
if (pmlmeext->sitesurvey_res.state > SCAN_START) {
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS)
pmlmeext->sitesurvey_res.channel_idx++;
if (pmlmeext->scan_abort) {
pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
pmlmeext->scan_abort = false;/* reset */
}
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c)
return;
psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
if (!psurveyPara) {
kfree(ph2c);
return;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
rtw_enqueue_cmd(pcmdpriv, ph2c);
}
}
void link_timer_hdl(struct timer_list *t)
{
struct adapter *padapter =
from_timer(padapter, t, mlmeextpriv.link_timer);
/* static unsigned int rx_pkt = 0; */
/* static u64 tx_cnt = 0; */
/* struct xmit_priv *pxmitpriv = &(padapter->xmitpriv); */
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
/* struct sta_priv *pstapriv = &padapter->stapriv; */
if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
pmlmeinfo->state = WIFI_FW_NULL_STATE;
report_join_res(padapter, -3);
} else if (pmlmeinfo->state & WIFI_FW_AUTH_STATE) {
/* re-auth timer */
if (++pmlmeinfo->reauth_count > REAUTH_LIMIT) {
pmlmeinfo->state = 0;
report_join_res(padapter, -1);
return;
}
pmlmeinfo->auth_seq = 1;
issue_auth(padapter, NULL, 0);
set_link_timer(pmlmeext, REAUTH_TO);
} else if (pmlmeinfo->state & WIFI_FW_ASSOC_STATE) {
/* re-assoc timer */
if (++pmlmeinfo->reassoc_count > REASSOC_LIMIT) {
pmlmeinfo->state = WIFI_FW_NULL_STATE;
report_join_res(padapter, -2);
return;
}
issue_assocreq(padapter);
set_link_timer(pmlmeext, REASSOC_TO);
}
}
void addba_timer_hdl(struct timer_list *t)
{
struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
struct ht_priv *phtpriv;
if (!psta)
return;
phtpriv = &psta->htpriv;
if (phtpriv->ht_option && phtpriv->ampdu_enable) {
if (phtpriv->candidate_tid_bitmap)
phtpriv->candidate_tid_bitmap = 0x0;
}
}
void sa_query_timer_hdl(struct timer_list *t)
{
struct adapter *padapter =
from_timer(padapter, t, mlmeextpriv.sa_query_timer);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
/* disconnect */
spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
rtw_disassoc_cmd(padapter, 0, true);
rtw_indicate_disconnect(padapter);
rtw_free_assoc_resources(padapter, 1);
}
spin_unlock_bh(&pmlmepriv->lock);
}
u8 NULL_hdl(struct adapter *padapter, u8 *pbuf)
{
return H2C_SUCCESS;
}
u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
{
u8 type;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct setopmode_parm *psetop = (struct setopmode_parm *)pbuf;
if (psetop->mode == Ndis802_11APMode) {
pmlmeinfo->state = WIFI_FW_AP_STATE;
type = _HW_STATE_AP_;
/* start_ap_mode(padapter); */
} else if (psetop->mode == Ndis802_11Infrastructure) {
pmlmeinfo->state &= ~(BIT(0)|BIT(1));/* clear state */
pmlmeinfo->state |= WIFI_FW_STATION_STATE;/* set to STATION_STATE */
type = _HW_STATE_STATION_;
} else if (psetop->mode == Ndis802_11IBSS) {
type = _HW_STATE_ADHOC_;
} else {
type = _HW_STATE_NOLINK_;
}
rtw_hal_set_hwreg(padapter, HW_VAR_SET_OPMODE, (u8 *)(&type));
/* Set_MSR(padapter, type); */
if (psetop->mode == Ndis802_11APMode) {
/* Do this after port switch to */
/* prevent from downloading rsvd page to wrong port */
rtw_btcoex_MediaStatusNotify(padapter, 1); /* connect */
}
return H2C_SUCCESS;
}
u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
struct joinbss_parm *pparm = (struct joinbss_parm *)pbuf;
/* u32 initialgain; */
if (pmlmeinfo->state == WIFI_FW_AP_STATE) {
start_bss_network(padapter);
return H2C_SUCCESS;
}
/* below is for ad-hoc master */
if (pparm->network.infrastructure_mode == Ndis802_11IBSS) {
rtw_joinbss_reset(padapter);
pmlmeext->cur_bwmode = CHANNEL_WIDTH_20;
pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
pmlmeinfo->ERP_enable = 0;
pmlmeinfo->WMM_enable = 0;
pmlmeinfo->HT_enable = 0;
pmlmeinfo->HT_caps_enable = 0;
pmlmeinfo->HT_info_enable = 0;
pmlmeinfo->agg_enable_bitmap = 0;
pmlmeinfo->candidate_tid_bitmap = 0;
/* disable dynamic functions, such as high power, DIG */
Save_DM_Func_Flag(padapter);
Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
/* config the initial gain under linking, need to write the BB registers */
/* initialgain = 0x1E; */
/* rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */
/* cancel link timer */
del_timer_sync(&pmlmeext->link_timer);
/* clear CAM */
flush_all_cam_entry(padapter);
memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, ie_length));
pnetwork->ie_length = ((struct wlan_bssid_ex *)pbuf)->ie_length;
if (pnetwork->ie_length > MAX_IE_SZ)/* Check pbuf->ie_length */
return H2C_PARAMETERS_ERROR;
memcpy(pnetwork->ies, ((struct wlan_bssid_ex *)pbuf)->ies, pnetwork->ie_length);
start_create_ibss(padapter);
}
return H2C_SUCCESS;
}
u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
{
u8 join_type;
struct ndis_80211_var_ie *pIE;
struct registry_priv *pregpriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
u32 i;
u8 cbw40_enable = 0;
/* u32 initialgain; */
/* u32 acparm; */
u8 ch, bw, offset;
/* check already connecting to AP or not */
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
if (pmlmeinfo->state & WIFI_FW_STATION_STATE)
issue_deauth_ex(padapter, pnetwork->mac_address, WLAN_REASON_DEAUTH_LEAVING, 1, 100);
pmlmeinfo->state = WIFI_FW_NULL_STATE;
/* clear CAM */
flush_all_cam_entry(padapter);
del_timer_sync(&pmlmeext->link_timer);
/* set MSR to nolink -> infra. mode */
/* Set_MSR(padapter, _HW_STATE_NOLINK_); */
Set_MSR(padapter, _HW_STATE_STATION_);
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
}
rtw_joinbss_reset(padapter);
pmlmeext->cur_bwmode = CHANNEL_WIDTH_20;
pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
pmlmeinfo->ERP_enable = 0;
pmlmeinfo->WMM_enable = 0;
pmlmeinfo->HT_enable = 0;
pmlmeinfo->HT_caps_enable = 0;
pmlmeinfo->HT_info_enable = 0;
pmlmeinfo->agg_enable_bitmap = 0;
pmlmeinfo->candidate_tid_bitmap = 0;
pmlmeinfo->bwmode_updated = false;
/* pmlmeinfo->assoc_AP_vendor = HT_IOT_PEER_MAX; */
pmlmeinfo->VHT_enable = 0;
memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, ie_length));
pnetwork->ie_length = ((struct wlan_bssid_ex *)pbuf)->ie_length;
if (pnetwork->ie_length > MAX_IE_SZ)/* Check pbuf->ie_length */
return H2C_PARAMETERS_ERROR;
memcpy(pnetwork->ies, ((struct wlan_bssid_ex *)pbuf)->ies, pnetwork->ie_length);
pmlmeext->cur_channel = (u8)pnetwork->configuration.ds_config;
pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
/* Check AP vendor to move rtw_joinbss_cmd() */
/* pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pnetwork->ies, pnetwork->ie_length); */
/* sizeof(struct ndis_802_11_fix_ie) */
for (i = _FIXED_IE_LENGTH_; i < pnetwork->ie_length;) {
pIE = (struct ndis_80211_var_ie *)(pnetwork->ies + i);
switch (pIE->element_id) {
case WLAN_EID_VENDOR_SPECIFIC:/* Get WMM IE. */
if (!memcmp(pIE->data, WMM_OUI, 4))
WMM_param_handler(padapter, pIE);
break;
case WLAN_EID_HT_CAPABILITY: /* Get HT Cap IE. */
pmlmeinfo->HT_caps_enable = 1;
break;
case WLAN_EID_HT_OPERATION: /* Get HT Info IE. */
pmlmeinfo->HT_info_enable = 1;
/* spec case only for cisco's ap because cisco's ap issue assoc rsp using mcs rate @40MHz or @20MHz */
{
struct HT_info_element *pht_info = (struct HT_info_element *)(pIE->data);
if (pnetwork->configuration.ds_config <= 14) {
if ((pregpriv->bw_mode & 0x0f) > CHANNEL_WIDTH_20)
cbw40_enable = 1;
}
if ((cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
/* switch to the 40M Hz mode according to the AP */
pmlmeext->cur_bwmode = CHANNEL_WIDTH_40;
switch (pht_info->infos[0] & 0x3) {
case 1:
pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
break;
case 3:
pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
break;
default:
pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
pmlmeext->cur_bwmode = CHANNEL_WIDTH_20;
break;
}
}
}
break;
default:
break;
}
i += (pIE->length + 2);
}
/* check channel, bandwidth, offset and switch */
if (rtw_chk_start_clnt_join(padapter, &ch, &bw, &offset) == _FAIL) {
report_join_res(padapter, (-4));
return H2C_SUCCESS;
}
/* disable dynamic functions, such as high power, DIG */
/* Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false); */
/* config the initial gain under linking, need to write the BB registers */
/* initialgain = 0x1E; */
/* rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */
rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, pmlmeinfo->network.mac_address);
join_type = 0;
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
rtw_hal_set_hwreg(padapter, HW_VAR_DO_IQK, NULL);
set_channel_bwmode(padapter, ch, offset, bw);
/* cancel link timer */
del_timer_sync(&pmlmeext->link_timer);
start_clnt_join(padapter);
return H2C_SUCCESS;
}
u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
{
struct disconnect_parm *param = (struct disconnect_parm *)pbuf;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
u8 val8;
if (is_client_associated_to_ap(padapter))
issue_deauth_ex(padapter, pnetwork->mac_address, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100);
if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) {
/* Stop BCN */
val8 = 0;
rtw_hal_set_hwreg(padapter, HW_VAR_BCN_FUNC, (u8 *)(&val8));
}
rtw_mlmeext_disconnect(padapter);
rtw_free_uc_swdec_pending_queue(padapter);
return H2C_SUCCESS;
}
static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_channel *out,
u32 out_num, struct rtw_ieee80211_channel *in, u32 in_num)
{
int i, j;
int set_idx;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
/* clear first */
memset(out, 0, sizeof(struct rtw_ieee80211_channel)*out_num);
/* acquire channels from in */
j = 0;
for (i = 0; i < in_num; i++) {
set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, in[i].hw_value);
if (in[i].hw_value && !(in[i].flags & RTW_IEEE80211_CHAN_DISABLED)
&& set_idx >= 0
) {
if (j >= out_num) {
netdev_dbg(padapter->pnetdev,
FUNC_ADPT_FMT " out_num:%u not enough\n",
FUNC_ADPT_ARG(padapter), out_num);
break;
}
memcpy(&out[j], &in[i], sizeof(struct rtw_ieee80211_channel));
if (pmlmeext->channel_set[set_idx].ScanType == SCAN_PASSIVE)
out[j].flags |= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
j++;
}
if (j >= out_num)
break;
}
/* if out is empty, use channel_set as default */
if (j == 0) {
for (i = 0; i < pmlmeext->max_chan_nums; i++) {
if (j >= out_num) {
netdev_dbg(padapter->pnetdev,
FUNC_ADPT_FMT " out_num:%u not enough\n",
FUNC_ADPT_ARG(padapter),
out_num);
break;
}
out[j].hw_value = pmlmeext->channel_set[i].ChannelNum;
if (pmlmeext->channel_set[i].ScanType == SCAN_PASSIVE)
out[j].flags |= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
j++;
}
}
return j;
}
u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct sitesurvey_parm *pparm = (struct sitesurvey_parm *)pbuf;
u8 bdelayscan = false;
u8 val8;
u32 initialgain;
u32 i;
if (pmlmeext->sitesurvey_res.state == SCAN_DISABLE) {
pmlmeext->sitesurvey_res.state = SCAN_START;
pmlmeext->sitesurvey_res.bss_cnt = 0;
pmlmeext->sitesurvey_res.channel_idx = 0;
for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
if (pparm->ssid[i].ssid_length) {
memcpy(pmlmeext->sitesurvey_res.ssid[i].ssid, pparm->ssid[i].ssid, IW_ESSID_MAX_SIZE);
pmlmeext->sitesurvey_res.ssid[i].ssid_length = pparm->ssid[i].ssid_length;
} else {
pmlmeext->sitesurvey_res.ssid[i].ssid_length = 0;
}
}
pmlmeext->sitesurvey_res.ch_num = rtw_scan_ch_decision(padapter
, pmlmeext->sitesurvey_res.ch, RTW_CHANNEL_SCAN_AMOUNT
, pparm->ch, pparm->ch_num
);
pmlmeext->sitesurvey_res.scan_mode = pparm->scan_mode;
/* issue null data if associating to the AP */
if (is_client_associated_to_ap(padapter)) {
pmlmeext->sitesurvey_res.state = SCAN_TXNULL;
issue_nulldata(padapter, NULL, 1, 3, 500);
bdelayscan = true;
}
if (bdelayscan) {
/* delay 50ms to protect nulldata(1). */
set_survey_timer(pmlmeext, 50);
return H2C_SUCCESS;
}
}
if ((pmlmeext->sitesurvey_res.state == SCAN_START) || (pmlmeext->sitesurvey_res.state == SCAN_TXNULL)) {
/* disable dynamic functions, such as high power, DIG */
Save_DM_Func_Flag(padapter);
Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
/* config the initial gain under scanning, need to write the BB
* registers
*/
initialgain = 0x1e;
rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
/* set MSR to no link state */
Set_MSR(padapter, _HW_STATE_NOLINK_);
val8 = 1; /* under site survey */
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
pmlmeext->sitesurvey_res.state = SCAN_PROCESS;
}
site_survey(padapter);
return H2C_SUCCESS;
}
u8 setauth_hdl(struct adapter *padapter, unsigned char *pbuf)
{
struct setauth_parm *pparm = (struct setauth_parm *)pbuf;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
if (pparm->mode < 4)
pmlmeinfo->auth_algo = pparm->mode;
return H2C_SUCCESS;
}
u8 setkey_hdl(struct adapter *padapter, u8 *pbuf)
{
u16 ctrl = 0;
s16 cam_id = 0;
struct setkey_parm *pparm = (struct setkey_parm *)pbuf;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
unsigned char null_addr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
u8 *addr;
/* main tx key for wep. */
if (pparm->set_tx)
pmlmeinfo->key_index = pparm->keyid;
cam_id = rtw_camid_alloc(padapter, NULL, pparm->keyid);
if (cam_id < 0) {
} else {
if (cam_id > 3) /* not default key, searched by A2 */
addr = get_bssid(&padapter->mlmepriv);
else
addr = null_addr;
ctrl = BIT(15) | BIT6 | ((pparm->algorithm) << 2) | pparm->keyid;
write_cam(padapter, cam_id, ctrl, addr, pparm->key);
netdev_dbg(padapter->pnetdev,
"set group key camid:%d, addr:%pM, kid:%d, type:%s\n",
cam_id, MAC_ARG(addr), pparm->keyid,
security_type_str(pparm->algorithm));
}
if (cam_id >= 0 && cam_id <= 3)
rtw_hal_set_hwreg(padapter, HW_VAR_SEC_DK_CFG, (u8 *)true);
/* allow multicast packets to driver */
padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_ON_RCR_AM, null_addr);
return H2C_SUCCESS;
}
u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
{
u16 ctrl = 0;
s16 cam_id = 0;
u8 ret = H2C_SUCCESS;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct set_stakey_parm *pparm = (struct set_stakey_parm *)pbuf;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta;
if (pparm->algorithm == _NO_PRIVACY_)
goto write_to_cam;
psta = rtw_get_stainfo(pstapriv, pparm->addr);
if (!psta) {
netdev_dbg(padapter->pnetdev, "%s sta:%pM not found\n",
__func__, MAC_ARG(pparm->addr));
ret = H2C_REJECTED;
goto exit;
}
pmlmeinfo->enc_algo = pparm->algorithm;
cam_id = rtw_camid_alloc(padapter, psta, 0);
if (cam_id < 0)
goto exit;
write_to_cam:
if (pparm->algorithm == _NO_PRIVACY_) {
while ((cam_id = rtw_camid_search(padapter, pparm->addr, -1)) >= 0) {
netdev_dbg(padapter->pnetdev,
"clear key for addr:%pM, camid:%d\n",
MAC_ARG(pparm->addr), cam_id);
clear_cam_entry(padapter, cam_id);
rtw_camid_free(padapter, cam_id);
}
} else {
netdev_dbg(padapter->pnetdev,
"set pairwise key camid:%d, addr:%pM, kid:%d, type:%s\n",
cam_id, MAC_ARG(pparm->addr), pparm->keyid,
security_type_str(pparm->algorithm));
ctrl = BIT(15) | ((pparm->algorithm) << 2) | pparm->keyid;
write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
}
ret = H2C_SUCCESS_RSP;
exit:
return ret;
}
u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf)
{
struct addBaReq_parm *pparm = (struct addBaReq_parm *)pbuf;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, pparm->addr);
if (!psta)
return H2C_SUCCESS;
if (((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && (pmlmeinfo->HT_enable)) ||
((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) {
/* pmlmeinfo->ADDBA_retry_count = 0; */
/* pmlmeinfo->candidate_tid_bitmap |= (0x1 << pparm->tid); */
/* psta->htpriv.candidate_tid_bitmap |= BIT(pparm->tid); */
issue_action_BA(padapter, pparm->addr, WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid);
/* _set_timer(&pmlmeext->ADDBA_timer, ADDBA_TO); */
_set_timer(&psta->addba_retry_timer, ADDBA_TO);
} else {
psta->htpriv.candidate_tid_bitmap &= ~BIT(pparm->tid);
}
return H2C_SUCCESS;
}
u8 chk_bmc_sleepq_cmd(struct adapter *padapter)
{
struct cmd_obj *ph2c;
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
init_h2fwcmd_w_parm_no_parm_rsp(ph2c, GEN_CMD_CODE(_ChkBMCSleepq));
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
u8 set_tx_beacon_cmd(struct adapter *padapter)
{
struct cmd_obj *ph2c;
struct Tx_Beacon_param *ptxBeacon_parm;
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 res = _SUCCESS;
int len_diff = 0;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
ptxBeacon_parm = rtw_zmalloc(sizeof(struct Tx_Beacon_param));
if (!ptxBeacon_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
memcpy(&(ptxBeacon_parm->network), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
len_diff = update_hidden_ssid(ptxBeacon_parm->network.ies+_BEACON_IE_OFFSET_,
ptxBeacon_parm->network.ie_length-_BEACON_IE_OFFSET_,
pmlmeinfo->hidden_ssid_mode);
ptxBeacon_parm->network.ie_length += len_diff;
init_h2fwcmd_w_parm_no_rsp(ph2c, ptxBeacon_parm, GEN_CMD_CODE(_TX_Beacon));
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
static struct fwevent wlanevents[] = {
{0, rtw_dummy_event_callback}, /*0*/
{0, NULL},
{0, NULL},
{0, NULL},
{0, NULL},
{0, NULL},
{0, NULL},
{0, NULL},
{0, &rtw_survey_event_callback}, /*8*/
{sizeof(struct surveydone_event), &rtw_surveydone_event_callback}, /*9*/
{0, &rtw_joinbss_event_callback}, /*10*/
{sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
{sizeof(struct stadel_event), &rtw_stadel_event_callback},
{0, &rtw_atimdone_event_callback},
{0, rtw_dummy_event_callback},
{0, NULL}, /*15*/
{0, NULL},
{0, NULL},
{0, NULL},
{0, rtw_fwdbg_event_callback},
{0, NULL}, /*20*/
{0, NULL},
{0, NULL},
{0, &rtw_cpwm_event_callback},
{0, NULL},
{0, &rtw_wmm_event_callback},
};
u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
{
u8 evt_code;
u16 evt_sz;
uint *peventbuf;
void (*event_callback)(struct adapter *dev, u8 *pbuf);
struct evt_priv *pevt_priv = &(padapter->evtpriv);
if (!pbuf)
goto _abort_event_;
peventbuf = (uint *)pbuf;
evt_sz = (u16)(*peventbuf&0xffff);
evt_code = (u8)((*peventbuf>>16)&0xff);
/* checking if event code is valid */
if (evt_code >= MAX_C2HEVT)
goto _abort_event_;
/* checking if event size match the event parm size */
if ((wlanevents[evt_code].parmsize != 0) &&
(wlanevents[evt_code].parmsize != evt_sz))
goto _abort_event_;
atomic_inc(&pevt_priv->event_seq);
peventbuf += 2;
if (peventbuf) {
event_callback = wlanevents[evt_code].event_callback;
event_callback(padapter, (u8 *)peventbuf);
pevt_priv->evt_done_cnt++;
}
_abort_event_:
return H2C_SUCCESS;
}
u8 h2c_msg_hdl(struct adapter *padapter, unsigned char *pbuf)
{
if (!pbuf)
return H2C_PARAMETERS_ERROR;
return H2C_SUCCESS;
}
u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
{
struct sta_info *psta_bmc;
struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
/* for BC/MC Frames */
psta_bmc = rtw_get_bcmc_stainfo(padapter);
if (!psta_bmc)
return H2C_SUCCESS;
if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
msleep(10);/* 10ms, ATIM(HIQ) Windows */
/* spin_lock_bh(&psta_bmc->sleep_q.lock); */
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
pxmitframe = list_entry(xmitframe_plist,
struct xmit_frame, list);
list_del_init(&pxmitframe->list);
psta_bmc->sleepq_len--;
if (psta_bmc->sleepq_len > 0)
pxmitframe->attrib.mdata = 1;
else
pxmitframe->attrib.mdata = 0;
pxmitframe->attrib.triggered = 1;
if (xmitframe_hiq_filter(pxmitframe))
pxmitframe->attrib.qsel = 0x11;/* HIQ */
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
}
/* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
/* check hi queue and bmc_sleepq */
rtw_chk_hi_queue_cmd(padapter);
}
return H2C_SUCCESS;
}
u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
{
if (send_beacon(padapter) == _FAIL)
return H2C_PARAMETERS_ERROR;
/* tx bc/mc frames after update TIM */
chk_bmc_sleepq_hdl(padapter, NULL);
return H2C_SUCCESS;
}
int rtw_chk_start_clnt_join(struct adapter *padapter, u8 *ch, u8 *bw, u8 *offset)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
unsigned char cur_ch = pmlmeext->cur_channel;
unsigned char cur_bw = pmlmeext->cur_bwmode;
unsigned char cur_ch_offset = pmlmeext->cur_ch_offset;
bool connect_allow = true;
if (!ch || !bw || !offset) {
rtw_warn_on(1);
connect_allow = false;
}
if (connect_allow) {
*ch = cur_ch;
*bw = cur_bw;
*offset = cur_ch_offset;
}
return connect_allow ? _SUCCESS : _FAIL;
}
u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf)
{
struct set_ch_parm *set_ch_parm;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
if (!pbuf)
return H2C_PARAMETERS_ERROR;
set_ch_parm = (struct set_ch_parm *)pbuf;
pmlmeext->cur_channel = set_ch_parm->ch;
pmlmeext->cur_ch_offset = set_ch_parm->ch_offset;
pmlmeext->cur_bwmode = set_ch_parm->bw;
set_channel_bwmode(padapter, set_ch_parm->ch, set_ch_parm->ch_offset, set_ch_parm->bw);
return H2C_SUCCESS;
}
u8 set_chplan_hdl(struct adapter *padapter, unsigned char *pbuf)
{
struct SetChannelPlan_param *setChannelPlan_param;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
if (!pbuf)
return H2C_PARAMETERS_ERROR;
setChannelPlan_param = (struct SetChannelPlan_param *)pbuf;
pmlmeext->max_chan_nums = init_channel_set(padapter, setChannelPlan_param->channel_plan, pmlmeext->channel_set);
init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
if (padapter->rtw_wdev && padapter->rtw_wdev->wiphy) {
struct regulatory_request request;
request.initiator = NL80211_REGDOM_SET_BY_DRIVER;
rtw_reg_notifier(padapter->rtw_wdev->wiphy, &request);
}
return H2C_SUCCESS;
}
u8 set_csa_hdl(struct adapter *padapter, unsigned char *pbuf)
{
return H2C_REJECTED;
}
/* TDLS_ESTABLISHED : write RCR DATA BIT */
/* TDLS_CS_OFF : go back to the channel linked with AP, terminating channel switch procedure */
/* TDLS_INIT_CH_SEN : init channel sensing, receive all data and mgnt frame */
/* TDLS_DONE_CH_SEN: channel sensing and report candidate channel */
/* TDLS_OFF_CH : first time set channel to off channel */
/* TDLS_BASE_CH : go back tp the channel linked with AP when set base channel as target channel */
/* TDLS_P_OFF_CH : periodically go to off channel */
/* TDLS_P_BASE_CH : periodically go back to base channel */
/* TDLS_RS_RCR : restore RCR */
/* TDLS_TEAR_STA : free tdls sta */
u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf)
{
return H2C_REJECTED;
}
u8 run_in_thread_hdl(struct adapter *padapter, u8 *pbuf)
{
struct RunInThread_param *p;
if (pbuf == NULL)
return H2C_PARAMETERS_ERROR;
p = (struct RunInThread_param *)pbuf;
if (p->func)
p->func(p->context);
return H2C_SUCCESS;
}
| linux-master | drivers/staging/rtl8723bs/core/rtw_mlme_ext.c |
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
#include <drv_types.h>
#include <rtw_debug.h>
#include <linux/of.h>
#include <asm/unaligned.h>
u8 RTW_WPA_OUI_TYPE[] = { 0x00, 0x50, 0xf2, 1 };
u16 RTW_WPA_VERSION = 1;
u8 WPA_AUTH_KEY_MGMT_NONE[] = { 0x00, 0x50, 0xf2, 0 };
u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x50, 0xf2, 1 };
u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x50, 0xf2, 2 };
u8 WPA_CIPHER_SUITE_NONE[] = { 0x00, 0x50, 0xf2, 0 };
u8 WPA_CIPHER_SUITE_WEP40[] = { 0x00, 0x50, 0xf2, 1 };
u8 WPA_CIPHER_SUITE_TKIP[] = { 0x00, 0x50, 0xf2, 2 };
u8 WPA_CIPHER_SUITE_WRAP[] = { 0x00, 0x50, 0xf2, 3 };
u8 WPA_CIPHER_SUITE_CCMP[] = { 0x00, 0x50, 0xf2, 4 };
u8 WPA_CIPHER_SUITE_WEP104[] = { 0x00, 0x50, 0xf2, 5 };
u16 RSN_VERSION_BSD = 1;
u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x0f, 0xac, 1 };
u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x0f, 0xac, 2 };
u8 RSN_CIPHER_SUITE_NONE[] = { 0x00, 0x0f, 0xac, 0 };
u8 RSN_CIPHER_SUITE_WEP40[] = { 0x00, 0x0f, 0xac, 1 };
u8 RSN_CIPHER_SUITE_TKIP[] = { 0x00, 0x0f, 0xac, 2 };
u8 RSN_CIPHER_SUITE_WRAP[] = { 0x00, 0x0f, 0xac, 3 };
u8 RSN_CIPHER_SUITE_CCMP[] = { 0x00, 0x0f, 0xac, 4 };
u8 RSN_CIPHER_SUITE_WEP104[] = { 0x00, 0x0f, 0xac, 5 };
/* */
/* for adhoc-master to generate ie and provide supported-rate to fw */
/* */
static u8 WIFI_CCKRATES[] = {
(IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK),
(IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK),
(IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK),
(IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK)
};
static u8 WIFI_OFDMRATES[] = {
(IEEE80211_OFDM_RATE_6MB),
(IEEE80211_OFDM_RATE_9MB),
(IEEE80211_OFDM_RATE_12MB),
(IEEE80211_OFDM_RATE_18MB),
(IEEE80211_OFDM_RATE_24MB),
IEEE80211_OFDM_RATE_36MB,
IEEE80211_OFDM_RATE_48MB,
IEEE80211_OFDM_RATE_54MB
};
int rtw_get_bit_value_from_ieee_value(u8 val)
{
unsigned char dot11_rate_table[] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0}; /* last element must be zero!! */
int i = 0;
while (dot11_rate_table[i] != 0) {
if (dot11_rate_table[i] == val)
return BIT(i);
i++;
}
return 0;
}
bool rtw_is_cckrates_included(u8 *rate)
{
while (*rate) {
u8 r = *rate & 0x7f;
if (r == 2 || r == 4 || r == 11 || r == 22)
return true;
rate++;
}
return false;
}
bool rtw_is_cckratesonly_included(u8 *rate)
{
while (*rate) {
u8 r = *rate & 0x7f;
if (r != 2 && r != 4 && r != 11 && r != 22)
return false;
rate++;
}
return true;
}
int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
{
if (channel > 14)
return WIRELESS_INVALID;
/* could be pure B, pure G, or B/G */
if (rtw_is_cckratesonly_included(rate))
return WIRELESS_11B;
if (rtw_is_cckrates_included(rate))
return WIRELESS_11BG;
return WIRELESS_11G;
}
u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
unsigned int *frlen)
{
memcpy((void *)pbuf, (void *)source, len);
*frlen = *frlen + len;
return pbuf + len;
}
/* rtw_set_ie will update frame length */
u8 *rtw_set_ie(u8 *pbuf,
signed int index,
uint len,
u8 *source,
uint *frlen) /* frame length */
{
*pbuf = (u8)index;
*(pbuf + 1) = (u8)len;
if (len > 0)
memcpy((void *)(pbuf + 2), (void *)source, len);
*frlen = *frlen + (len + 2);
return pbuf + len + 2;
}
/*----------------------------------------------------------------------------
index: the information element id index, limit is the limit for search
-----------------------------------------------------------------------------*/
u8 *rtw_get_ie(u8 *pbuf, signed int index, signed int *len, signed int limit)
{
signed int tmp, i;
u8 *p;
if (limit < 1)
return NULL;
p = pbuf;
i = 0;
*len = 0;
while (1) {
if (*p == index) {
*len = *(p + 1);
return p;
}
tmp = *(p + 1);
p += (tmp + 2);
i += (tmp + 2);
if (i >= limit)
break;
}
return NULL;
}
/**
* rtw_get_ie_ex - Search specific IE from a series of IEs
* @in_ie: Address of IEs to search
* @in_len: Length limit from in_ie
* @eid: Element ID to match
* @oui: OUI to match
* @oui_len: OUI length
* @ie: If not NULL and the specific IE is found, the IE will be copied to the buf starting from the specific IE
* @ielen: If not NULL and the specific IE is found, will set to the length of the entire IE
*
* Returns: The address of the specific IE found, or NULL
*/
u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen)
{
uint cnt;
u8 *target_ie = NULL;
if (ielen)
*ielen = 0;
if (!in_ie || in_len <= 0)
return target_ie;
cnt = 0;
while (cnt < in_len) {
if (eid == in_ie[cnt]
&& (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) {
target_ie = &in_ie[cnt];
if (ie)
memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2);
if (ielen)
*ielen = in_ie[cnt+1]+2;
break;
}
cnt += in_ie[cnt+1]+2; /* goto next */
}
return target_ie;
}
/**
* rtw_ies_remove_ie - Find matching IEs and remove
* @ies: Address of IEs to search
* @ies_len: Pointer of length of ies, will update to new length
* @offset: The offset to start search
* @eid: Element ID to match
* @oui: OUI to match
* @oui_len: OUI length
*
* Returns: _SUCCESS: ies is updated, _FAIL: not updated
*/
int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len)
{
int ret = _FAIL;
u8 *target_ie;
u32 target_ielen;
u8 *start;
uint search_len;
if (!ies || !ies_len || *ies_len <= offset)
goto exit;
start = ies + offset;
search_len = *ies_len - offset;
while (1) {
target_ie = rtw_get_ie_ex(start, search_len, eid, oui, oui_len, NULL, &target_ielen);
if (target_ie && target_ielen) {
u8 *remain_ies = target_ie + target_ielen;
uint remain_len = search_len - (remain_ies - start);
memcpy(target_ie, remain_ies, remain_len);
*ies_len = *ies_len - target_ielen;
ret = _SUCCESS;
start = target_ie;
search_len = remain_len;
} else {
break;
}
}
exit:
return ret;
}
void rtw_set_supported_rate(u8 *supported_rates, uint mode)
{
memset(supported_rates, 0, NDIS_802_11_LENGTH_RATES_EX);
switch (mode) {
case WIRELESS_11B:
memcpy(supported_rates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
break;
case WIRELESS_11G:
memcpy(supported_rates, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
break;
case WIRELESS_11BG:
case WIRELESS_11G_24N:
case WIRELESS_11_24N:
case WIRELESS_11BG_24N:
memcpy(supported_rates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
memcpy(supported_rates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
break;
}
}
uint rtw_get_rateset_len(u8 *rateset)
{
uint i;
for (i = 0; i < 13; i++)
if (rateset[i] == 0)
break;
return i;
}
int rtw_generate_ie(struct registry_priv *pregistrypriv)
{
u8 wireless_mode;
int sz = 0, rateLen;
struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
u8 *ie = pdev_network->ies;
/* timestamp will be inserted by hardware */
sz += 8;
ie += sz;
/* beacon interval : 2bytes */
*(__le16 *)ie = cpu_to_le16((u16)pdev_network->configuration.beacon_period);/* BCN_INTERVAL; */
sz += 2;
ie += 2;
/* capability info */
*(u16 *)ie = 0;
*(__le16 *)ie |= cpu_to_le16(WLAN_CAPABILITY_IBSS);
if (pregistrypriv->preamble == PREAMBLE_SHORT)
*(__le16 *)ie |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
if (pdev_network->privacy)
*(__le16 *)ie |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
sz += 2;
ie += 2;
/* SSID */
ie = rtw_set_ie(ie, WLAN_EID_SSID, pdev_network->ssid.ssid_length, pdev_network->ssid.ssid, &sz);
/* supported rates */
wireless_mode = pregistrypriv->wireless_mode;
rtw_set_supported_rate(pdev_network->supported_rates, wireless_mode);
rateLen = rtw_get_rateset_len(pdev_network->supported_rates);
if (rateLen > 8) {
ie = rtw_set_ie(ie, WLAN_EID_SUPP_RATES, 8, pdev_network->supported_rates, &sz);
/* ie = rtw_set_ie(ie, WLAN_EID_EXT_SUPP_RATES, (rateLen - 8), (pdev_network->supported_rates + 8), &sz); */
} else {
ie = rtw_set_ie(ie, WLAN_EID_SUPP_RATES, rateLen, pdev_network->supported_rates, &sz);
}
/* DS parameter set */
ie = rtw_set_ie(ie, WLAN_EID_DS_PARAMS, 1, (u8 *)&(pdev_network->configuration.ds_config), &sz);
/* IBSS Parameter Set */
ie = rtw_set_ie(ie, WLAN_EID_IBSS_PARAMS, 2, (u8 *)&(pdev_network->configuration.atim_window), &sz);
if (rateLen > 8)
ie = rtw_set_ie(ie, WLAN_EID_EXT_SUPP_RATES, (rateLen - 8), (pdev_network->supported_rates + 8), &sz);
/* HT Cap. */
if ((pregistrypriv->wireless_mode & WIRELESS_11_24N) &&
(pregistrypriv->ht_enable == true)) {
/* todo: */
}
/* pdev_network->ie_length = sz; update ie_length */
/* return _SUCCESS; */
return sz;
}
unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
{
int len;
u16 val16;
unsigned char wpa_oui_type[] = {0x00, 0x50, 0xf2, 0x01};
u8 *pbuf = pie;
int limit_new = limit;
__le16 le_tmp;
while (1) {
pbuf = rtw_get_ie(pbuf, WLAN_EID_VENDOR_SPECIFIC, &len, limit_new);
if (pbuf) {
/* check if oui matches... */
if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type)))
goto check_next_ie;
/* check version... */
memcpy((u8 *)&le_tmp, (pbuf + 6), sizeof(val16));
val16 = le16_to_cpu(le_tmp);
if (val16 != 0x0001)
goto check_next_ie;
*wpa_ie_len = *(pbuf + 1);
return pbuf;
} else {
*wpa_ie_len = 0;
return NULL;
}
check_next_ie:
limit_new = limit - (pbuf - pie) - 2 - len;
if (limit_new <= 0)
break;
pbuf += (2 + len);
}
*wpa_ie_len = 0;
return NULL;
}
unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit)
{
return rtw_get_ie(pie, WLAN_EID_RSN, rsn_ie_len, limit);
}
int rtw_get_wpa_cipher_suite(u8 *s)
{
if (!memcmp(s, WPA_CIPHER_SUITE_NONE, WPA_SELECTOR_LEN))
return WPA_CIPHER_NONE;
if (!memcmp(s, WPA_CIPHER_SUITE_WEP40, WPA_SELECTOR_LEN))
return WPA_CIPHER_WEP40;
if (!memcmp(s, WPA_CIPHER_SUITE_TKIP, WPA_SELECTOR_LEN))
return WPA_CIPHER_TKIP;
if (!memcmp(s, WPA_CIPHER_SUITE_CCMP, WPA_SELECTOR_LEN))
return WPA_CIPHER_CCMP;
if (!memcmp(s, WPA_CIPHER_SUITE_WEP104, WPA_SELECTOR_LEN))
return WPA_CIPHER_WEP104;
return 0;
}
int rtw_get_wpa2_cipher_suite(u8 *s)
{
if (!memcmp(s, RSN_CIPHER_SUITE_NONE, RSN_SELECTOR_LEN))
return WPA_CIPHER_NONE;
if (!memcmp(s, RSN_CIPHER_SUITE_WEP40, RSN_SELECTOR_LEN))
return WPA_CIPHER_WEP40;
if (!memcmp(s, RSN_CIPHER_SUITE_TKIP, RSN_SELECTOR_LEN))
return WPA_CIPHER_TKIP;
if (!memcmp(s, RSN_CIPHER_SUITE_CCMP, RSN_SELECTOR_LEN))
return WPA_CIPHER_CCMP;
if (!memcmp(s, RSN_CIPHER_SUITE_WEP104, RSN_SELECTOR_LEN))
return WPA_CIPHER_WEP104;
return 0;
}
int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
{
int i, ret = _SUCCESS;
int left, count;
u8 *pos;
u8 SUITE_1X[4] = {0x00, 0x50, 0xf2, 1};
if (wpa_ie_len <= 0) {
/* No WPA IE - fail silently */
return _FAIL;
}
if ((*wpa_ie != WLAN_EID_VENDOR_SPECIFIC) || (*(wpa_ie+1) != (u8)(wpa_ie_len - 2)) ||
(memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN))) {
return _FAIL;
}
pos = wpa_ie;
pos += 8;
left = wpa_ie_len - 8;
/* group_cipher */
if (left >= WPA_SELECTOR_LEN) {
*group_cipher = rtw_get_wpa_cipher_suite(pos);
pos += WPA_SELECTOR_LEN;
left -= WPA_SELECTOR_LEN;
} else if (left > 0)
return _FAIL;
/* pairwise_cipher */
if (left >= 2) {
/* count = le16_to_cpu(*(u16*)pos); */
count = get_unaligned_le16(pos);
pos += 2;
left -= 2;
if (count == 0 || left < count * WPA_SELECTOR_LEN)
return _FAIL;
for (i = 0; i < count; i++) {
*pairwise_cipher |= rtw_get_wpa_cipher_suite(pos);
pos += WPA_SELECTOR_LEN;
left -= WPA_SELECTOR_LEN;
}
} else if (left == 1)
return _FAIL;
if (is_8021x) {
if (left >= 6) {
pos += 2;
if (!memcmp(pos, SUITE_1X, 4))
*is_8021x = 1;
}
}
return ret;
}
int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
{
int i, ret = _SUCCESS;
int left, count;
u8 *pos;
u8 SUITE_1X[4] = {0x00, 0x0f, 0xac, 0x01};
if (rsn_ie_len <= 0) {
/* No RSN IE - fail silently */
return _FAIL;
}
if ((*rsn_ie != WLAN_EID_RSN) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2)))
return _FAIL;
pos = rsn_ie;
pos += 4;
left = rsn_ie_len - 4;
/* group_cipher */
if (left >= RSN_SELECTOR_LEN) {
*group_cipher = rtw_get_wpa2_cipher_suite(pos);
pos += RSN_SELECTOR_LEN;
left -= RSN_SELECTOR_LEN;
} else if (left > 0)
return _FAIL;
/* pairwise_cipher */
if (left >= 2) {
/* count = le16_to_cpu(*(u16*)pos); */
count = get_unaligned_le16(pos);
pos += 2;
left -= 2;
if (count == 0 || left < count * RSN_SELECTOR_LEN)
return _FAIL;
for (i = 0; i < count; i++) {
*pairwise_cipher |= rtw_get_wpa2_cipher_suite(pos);
pos += RSN_SELECTOR_LEN;
left -= RSN_SELECTOR_LEN;
}
} else if (left == 1)
return _FAIL;
if (is_8021x) {
if (left >= 6) {
pos += 2;
if (!memcmp(pos, SUITE_1X, 4))
*is_8021x = 1;
}
}
return ret;
}
/* ifdef CONFIG_WAPI_SUPPORT */
int rtw_get_wapi_ie(u8 *in_ie, uint in_len, u8 *wapi_ie, u16 *wapi_len)
{
int len = 0;
u8 authmode;
uint cnt;
u8 wapi_oui1[4] = {0x0, 0x14, 0x72, 0x01};
u8 wapi_oui2[4] = {0x0, 0x14, 0x72, 0x02};
if (wapi_len)
*wapi_len = 0;
if (!in_ie || in_len <= 0)
return len;
cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_);
while (cnt < in_len) {
authmode = in_ie[cnt];
/* if (authmode == WLAN_EID_BSS_AC_ACCESS_DELAY) */
if (authmode == WLAN_EID_BSS_AC_ACCESS_DELAY && (!memcmp(&in_ie[cnt+6], wapi_oui1, 4) ||
!memcmp(&in_ie[cnt+6], wapi_oui2, 4))) {
if (wapi_ie)
memcpy(wapi_ie, &in_ie[cnt], in_ie[cnt+1]+2);
if (wapi_len)
*wapi_len = in_ie[cnt+1]+2;
cnt += in_ie[cnt+1]+2; /* get next */
} else {
cnt += in_ie[cnt+1]+2; /* get next */
}
}
if (wapi_len)
len = *wapi_len;
return len;
}
/* endif */
void rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len)
{
u8 authmode;
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
uint cnt;
/* Search required WPA or WPA2 IE and copy to sec_ie[ ] */
cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_);
while (cnt < in_len) {
authmode = in_ie[cnt];
if ((authmode == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) {
if (wpa_ie)
memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt+1]+2);
*wpa_len = in_ie[cnt + 1] + 2;
cnt += in_ie[cnt + 1] + 2; /* get next */
} else {
if (authmode == WLAN_EID_RSN) {
if (rsn_ie)
memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
*rsn_len = in_ie[cnt+1]+2;
cnt += in_ie[cnt+1]+2; /* get next */
} else {
cnt += in_ie[cnt+1]+2; /* get next */
}
}
}
}
/**
* rtw_get_wps_ie - Search WPS IE from a series of IEs
* @in_ie: Address of IEs to search
* @in_len: Length limit from in_ie
* @wps_ie: If not NULL and WPS IE is found, WPS IE will be copied to the buf starting from wps_ie
* @wps_ielen: If not NULL and WPS IE is found, will set to the length of the entire WPS IE
*
* Returns: The address of the WPS IE found, or NULL
*/
u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
{
uint cnt;
u8 *wpsie_ptr = NULL;
u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
if (wps_ielen)
*wps_ielen = 0;
if (!in_ie || in_len <= 0)
return wpsie_ptr;
cnt = 0;
while (cnt < in_len) {
eid = in_ie[cnt];
if ((eid == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(&in_ie[cnt+2], wps_oui, 4))) {
wpsie_ptr = &in_ie[cnt];
if (wps_ie)
memcpy(wps_ie, &in_ie[cnt], in_ie[cnt+1]+2);
if (wps_ielen)
*wps_ielen = in_ie[cnt+1]+2;
cnt += in_ie[cnt+1]+2;
break;
}
cnt += in_ie[cnt+1]+2; /* goto next */
}
return wpsie_ptr;
}
/**
* rtw_get_wps_attr - Search a specific WPS attribute from a given WPS IE
* @wps_ie: Address of WPS IE to search
* @wps_ielen: Length limit from wps_ie
* @target_attr_id: The attribute ID of WPS attribute to search
* @buf_attr: If not NULL and the WPS attribute is found, WPS attribute will be copied to the buf starting from buf_attr
* @len_attr: If not NULL and the WPS attribute is found, will set to the length of the entire WPS attribute
*
* Returns: the address of the specific WPS attribute found, or NULL
*/
u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_attr, u32 *len_attr)
{
u8 *attr_ptr = NULL;
u8 *target_attr_ptr = NULL;
u8 wps_oui[4] = {0x00, 0x50, 0xF2, 0x04};
if (len_attr)
*len_attr = 0;
if ((wps_ie[0] != WLAN_EID_VENDOR_SPECIFIC) ||
(memcmp(wps_ie + 2, wps_oui, 4))) {
return attr_ptr;
}
/* 6 = 1(Element ID) + 1(Length) + 4(WPS OUI) */
attr_ptr = wps_ie + 6; /* goto first attr */
while (attr_ptr - wps_ie < wps_ielen) {
/* 4 = 2(Attribute ID) + 2(Length) */
u16 attr_id = get_unaligned_be16(attr_ptr);
u16 attr_data_len = get_unaligned_be16(attr_ptr + 2);
u16 attr_len = attr_data_len + 4;
if (attr_id == target_attr_id) {
target_attr_ptr = attr_ptr;
if (buf_attr)
memcpy(buf_attr, attr_ptr, attr_len);
if (len_attr)
*len_attr = attr_len;
break;
}
attr_ptr += attr_len; /* goto next */
}
return target_attr_ptr;
}
/**
* rtw_get_wps_attr_content - Search a specific WPS attribute content from a given WPS IE
* @wps_ie: Address of WPS IE to search
* @wps_ielen: Length limit from wps_ie
* @target_attr_id: The attribute ID of WPS attribute to search
* @buf_content: If not NULL and the WPS attribute is found, WPS attribute content will be copied to the buf starting from buf_content
* @len_content: If not NULL and the WPS attribute is found, will set to the length of the WPS attribute content
*
* Returns: the address of the specific WPS attribute content found, or NULL
*/
u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_content, uint *len_content)
{
u8 *attr_ptr;
u32 attr_len;
if (len_content)
*len_content = 0;
attr_ptr = rtw_get_wps_attr(wps_ie, wps_ielen, target_attr_id, NULL, &attr_len);
if (attr_ptr && attr_len) {
if (buf_content)
memcpy(buf_content, attr_ptr+4, attr_len-4);
if (len_content)
*len_content = attr_len-4;
return attr_ptr+4;
}
return NULL;
}
static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
struct rtw_ieee802_11_elems *elems,
int show_errors)
{
unsigned int oui;
/* first 3 bytes in vendor specific information element are the IEEE
* OUI of the vendor. The following byte is used a vendor specific
* sub-type. */
if (elen < 4)
return -1;
oui = get_unaligned_be24(pos);
switch (oui) {
case OUI_MICROSOFT:
/* Microsoft/Wi-Fi information elements are further typed and
* subtyped */
switch (pos[3]) {
case 1:
/* Microsoft OUI (00:50:F2) with OUI Type 1:
* real WPA information element */
elems->wpa_ie = pos;
elems->wpa_ie_len = elen;
break;
case WME_OUI_TYPE: /* this is a Wi-Fi WME info. element */
if (elen < 5)
return -1;
switch (pos[4]) {
case WME_OUI_SUBTYPE_INFORMATION_ELEMENT:
case WME_OUI_SUBTYPE_PARAMETER_ELEMENT:
elems->wme = pos;
elems->wme_len = elen;
break;
case WME_OUI_SUBTYPE_TSPEC_ELEMENT:
elems->wme_tspec = pos;
elems->wme_tspec_len = elen;
break;
default:
return -1;
}
break;
case 4:
/* Wi-Fi Protected Setup (WPS) IE */
elems->wps_ie = pos;
elems->wps_ie_len = elen;
break;
default:
return -1;
}
break;
case OUI_BROADCOM:
switch (pos[3]) {
case VENDOR_HT_CAPAB_OUI_TYPE:
elems->vendor_ht_cap = pos;
elems->vendor_ht_cap_len = elen;
break;
default:
return -1;
}
break;
default:
return -1;
}
return 0;
}
/**
* rtw_ieee802_11_parse_elems - Parse information elements in management frames
* @start: Pointer to the start of IEs
* @len: Length of IE buffer in octets
* @elems: Data structure for parsed elements
* @show_errors: Whether to show parsing errors in debug log
* Returns: Parsing result
*/
enum ParseRes rtw_ieee802_11_parse_elems(u8 *start, uint len,
struct rtw_ieee802_11_elems *elems,
int show_errors)
{
uint left = len;
u8 *pos = start;
int unknown = 0;
memset(elems, 0, sizeof(*elems));
while (left >= 2) {
u8 id, elen;
id = *pos++;
elen = *pos++;
left -= 2;
if (elen > left)
return ParseFailed;
switch (id) {
case WLAN_EID_SSID:
elems->ssid = pos;
elems->ssid_len = elen;
break;
case WLAN_EID_SUPP_RATES:
elems->supp_rates = pos;
elems->supp_rates_len = elen;
break;
case WLAN_EID_FH_PARAMS:
elems->fh_params = pos;
elems->fh_params_len = elen;
break;
case WLAN_EID_DS_PARAMS:
elems->ds_params = pos;
elems->ds_params_len = elen;
break;
case WLAN_EID_CF_PARAMS:
elems->cf_params = pos;
elems->cf_params_len = elen;
break;
case WLAN_EID_TIM:
elems->tim = pos;
elems->tim_len = elen;
break;
case WLAN_EID_IBSS_PARAMS:
elems->ibss_params = pos;
elems->ibss_params_len = elen;
break;
case WLAN_EID_CHALLENGE:
elems->challenge = pos;
elems->challenge_len = elen;
break;
case WLAN_EID_ERP_INFO:
elems->erp_info = pos;
elems->erp_info_len = elen;
break;
case WLAN_EID_EXT_SUPP_RATES:
elems->ext_supp_rates = pos;
elems->ext_supp_rates_len = elen;
break;
case WLAN_EID_VENDOR_SPECIFIC:
if (rtw_ieee802_11_parse_vendor_specific(pos, elen,
elems,
show_errors))
unknown++;
break;
case WLAN_EID_RSN:
elems->rsn_ie = pos;
elems->rsn_ie_len = elen;
break;
case WLAN_EID_PWR_CAPABILITY:
elems->power_cap = pos;
elems->power_cap_len = elen;
break;
case WLAN_EID_SUPPORTED_CHANNELS:
elems->supp_channels = pos;
elems->supp_channels_len = elen;
break;
case WLAN_EID_MOBILITY_DOMAIN:
elems->mdie = pos;
elems->mdie_len = elen;
break;
case WLAN_EID_FAST_BSS_TRANSITION:
elems->ftie = pos;
elems->ftie_len = elen;
break;
case WLAN_EID_TIMEOUT_INTERVAL:
elems->timeout_int = pos;
elems->timeout_int_len = elen;
break;
case WLAN_EID_HT_CAPABILITY:
elems->ht_capabilities = pos;
elems->ht_capabilities_len = elen;
break;
case WLAN_EID_HT_OPERATION:
elems->ht_operation = pos;
elems->ht_operation_len = elen;
break;
case WLAN_EID_VHT_CAPABILITY:
elems->vht_capabilities = pos;
elems->vht_capabilities_len = elen;
break;
case WLAN_EID_VHT_OPERATION:
elems->vht_operation = pos;
elems->vht_operation_len = elen;
break;
case WLAN_EID_OPMODE_NOTIF:
elems->vht_op_mode_notify = pos;
elems->vht_op_mode_notify_len = elen;
break;
default:
unknown++;
break;
}
left -= elen;
pos += elen;
}
if (left)
return ParseFailed;
return unknown ? ParseUnknown : ParseOK;
}
void rtw_macaddr_cfg(struct device *dev, u8 *mac_addr)
{
u8 mac[ETH_ALEN];
struct device_node *np = dev->of_node;
const unsigned char *addr;
int len;
if (!mac_addr)
return;
if (rtw_initmac && mac_pton(rtw_initmac, mac)) {
/* Users specify the mac address */
ether_addr_copy(mac_addr, mac);
} else {
/* Use the mac address stored in the Efuse */
ether_addr_copy(mac, mac_addr);
}
if (is_broadcast_ether_addr(mac) || is_zero_ether_addr(mac)) {
addr = of_get_property(np, "local-mac-address", &len);
if (addr && len == ETH_ALEN) {
ether_addr_copy(mac_addr, addr);
} else {
eth_random_addr(mac_addr);
}
}
}
static int rtw_get_cipher_info(struct wlan_network *pnetwork)
{
u32 wpa_ielen;
unsigned char *pbuf;
int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
int ret = _FAIL;
pbuf = rtw_get_wpa_ie(&pnetwork->network.ies[12], &wpa_ielen, pnetwork->network.ie_length-12);
if (pbuf && (wpa_ielen > 0)) {
if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) {
pnetwork->bcn_info.pairwise_cipher = pairwise_cipher;
pnetwork->bcn_info.group_cipher = group_cipher;
pnetwork->bcn_info.is_8021x = is8021x;
ret = _SUCCESS;
}
} else {
pbuf = rtw_get_wpa2_ie(&pnetwork->network.ies[12], &wpa_ielen, pnetwork->network.ie_length-12);
if (pbuf && (wpa_ielen > 0)) {
if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) {
pnetwork->bcn_info.pairwise_cipher = pairwise_cipher;
pnetwork->bcn_info.group_cipher = group_cipher;
pnetwork->bcn_info.is_8021x = is8021x;
ret = _SUCCESS;
}
}
}
return ret;
}
void rtw_get_bcn_info(struct wlan_network *pnetwork)
{
unsigned short cap = 0;
u8 bencrypt = 0;
/* u8 wpa_ie[255], rsn_ie[255]; */
u16 wpa_len = 0, rsn_len = 0;
struct HT_info_element *pht_info = NULL;
struct ieee80211_ht_cap *pht_cap = NULL;
unsigned int len;
unsigned char *p;
__le16 le_cap;
memcpy((u8 *)&le_cap, rtw_get_capability_from_ie(pnetwork->network.ies), 2);
cap = le16_to_cpu(le_cap);
if (cap & WLAN_CAPABILITY_PRIVACY) {
bencrypt = 1;
pnetwork->network.privacy = 1;
} else {
pnetwork->bcn_info.encryp_protocol = ENCRYP_PROTOCOL_OPENSYS;
}
rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, NULL, &rsn_len, NULL, &wpa_len);
if (rsn_len > 0) {
pnetwork->bcn_info.encryp_protocol = ENCRYP_PROTOCOL_WPA2;
} else if (wpa_len > 0) {
pnetwork->bcn_info.encryp_protocol = ENCRYP_PROTOCOL_WPA;
} else {
if (bencrypt)
pnetwork->bcn_info.encryp_protocol = ENCRYP_PROTOCOL_WEP;
}
rtw_get_cipher_info(pnetwork);
/* get bwmode and ch_offset */
/* parsing HT_CAP_IE */
p = rtw_get_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, WLAN_EID_HT_CAPABILITY, &len, pnetwork->network.ie_length - _FIXED_IE_LENGTH_);
if (p && len > 0) {
pht_cap = (struct ieee80211_ht_cap *)(p + 2);
pnetwork->bcn_info.ht_cap_info = le16_to_cpu(pht_cap->cap_info);
} else {
pnetwork->bcn_info.ht_cap_info = 0;
}
/* parsing HT_INFO_IE */
p = rtw_get_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, WLAN_EID_HT_OPERATION, &len, pnetwork->network.ie_length - _FIXED_IE_LENGTH_);
if (p && len > 0) {
pht_info = (struct HT_info_element *)(p + 2);
pnetwork->bcn_info.ht_info_infos_0 = pht_info->infos[0];
} else {
pnetwork->bcn_info.ht_info_infos_0 = 0;
}
}
/* show MCS rate, unit: 100Kbps */
u16 rtw_mcs_rate(u8 bw_40MHz, u8 short_GI, unsigned char *MCS_rate)
{
u16 max_rate = 0;
if (MCS_rate[0] & BIT(7))
max_rate = (bw_40MHz) ? ((short_GI)?1500:1350):((short_GI)?722:650);
else if (MCS_rate[0] & BIT(6))
max_rate = (bw_40MHz) ? ((short_GI)?1350:1215):((short_GI)?650:585);
else if (MCS_rate[0] & BIT(5))
max_rate = (bw_40MHz) ? ((short_GI)?1200:1080):((short_GI)?578:520);
else if (MCS_rate[0] & BIT(4))
max_rate = (bw_40MHz) ? ((short_GI)?900:810):((short_GI)?433:390);
else if (MCS_rate[0] & BIT(3))
max_rate = (bw_40MHz) ? ((short_GI)?600:540):((short_GI)?289:260);
else if (MCS_rate[0] & BIT(2))
max_rate = (bw_40MHz) ? ((short_GI)?450:405):((short_GI)?217:195);
else if (MCS_rate[0] & BIT(1))
max_rate = (bw_40MHz) ? ((short_GI)?300:270):((short_GI)?144:130);
else if (MCS_rate[0] & BIT(0))
max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65);
return max_rate;
}
int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category, u8 *action)
{
const u8 *frame_body = frame + sizeof(struct ieee80211_hdr_3addr);
u16 fc;
u8 c;
u8 a = ACT_PUBLIC_MAX;
fc = le16_to_cpu(((struct ieee80211_hdr_3addr *)frame)->frame_control);
if ((fc & (IEEE80211_FCTL_FTYPE|IEEE80211_FCTL_STYPE))
!= (IEEE80211_FTYPE_MGMT|IEEE80211_STYPE_ACTION)
) {
return false;
}
c = frame_body[0];
switch (c) {
case RTW_WLAN_CATEGORY_P2P: /* vendor-specific */
break;
default:
a = frame_body[1];
}
if (category)
*category = c;
if (action)
*action = a;
return true;
}
static const char *_action_public_str[] = {
"ACT_PUB_BSSCOEXIST",
"ACT_PUB_DSE_ENABLE",
"ACT_PUB_DSE_DEENABLE",
"ACT_PUB_DSE_REG_LOCATION",
"ACT_PUB_EXT_CHL_SWITCH",
"ACT_PUB_DSE_MSR_REQ",
"ACT_PUB_DSE_MSR_RPRT",
"ACT_PUB_MP",
"ACT_PUB_DSE_PWR_CONSTRAINT",
"ACT_PUB_VENDOR",
"ACT_PUB_GAS_INITIAL_REQ",
"ACT_PUB_GAS_INITIAL_RSP",
"ACT_PUB_GAS_COMEBACK_REQ",
"ACT_PUB_GAS_COMEBACK_RSP",
"ACT_PUB_TDLS_DISCOVERY_RSP",
"ACT_PUB_LOCATION_TRACK",
"ACT_PUB_RSVD",
};
const char *action_public_str(u8 action)
{
action = (action >= ACT_PUBLIC_MAX) ? ACT_PUBLIC_MAX : action;
return _action_public_str[action];
}
| linux-master | drivers/staging/rtl8723bs/core/rtw_ieee80211.c |
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
/*
The purpose of rtw_io.c
a. provides the API
b. provides the protocol engine
c. provides the software interface between caller and the hardware interface
Compiler Flag Option:
1. CONFIG_SDIO_HCI:
a. USE_SYNC_IRP: Only sync operations are provided.
b. USE_ASYNC_IRP:Both sync/async operations are provided.
[email protected]
*/
#include <drv_types.h>
#include <rtw_debug.h>
u8 rtw_read8(struct adapter *adapter, u32 addr)
{
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
_read8 = pintfhdl->io_ops._read8;
return _read8(pintfhdl, addr);
}
u16 rtw_read16(struct adapter *adapter, u32 addr)
{
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
_read16 = pintfhdl->io_ops._read16;
return _read16(pintfhdl, addr);
}
u32 rtw_read32(struct adapter *adapter, u32 addr)
{
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
_read32 = pintfhdl->io_ops._read32;
return _read32(pintfhdl, addr);
}
int rtw_write8(struct adapter *adapter, u32 addr, u8 val)
{
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
int ret;
_write8 = pintfhdl->io_ops._write8;
ret = _write8(pintfhdl, addr, val);
return RTW_STATUS_CODE(ret);
}
int rtw_write16(struct adapter *adapter, u32 addr, u16 val)
{
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
int ret;
_write16 = pintfhdl->io_ops._write16;
ret = _write16(pintfhdl, addr, val);
return RTW_STATUS_CODE(ret);
}
int rtw_write32(struct adapter *adapter, u32 addr, u32 val)
{
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
int ret;
_write32 = pintfhdl->io_ops._write32;
ret = _write32(pintfhdl, addr, val);
return RTW_STATUS_CODE(ret);
}
u32 rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
{
u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
_write_port = pintfhdl->io_ops._write_port;
return _write_port(pintfhdl, addr, cnt, pmem);
}
int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct adapter *padapter, struct _io_ops *pops))
{
struct io_priv *piopriv = &padapter->iopriv;
struct intf_hdl *pintf = &piopriv->intf;
if (!set_intf_ops)
return _FAIL;
piopriv->padapter = padapter;
pintf->padapter = padapter;
pintf->pintf_dev = adapter_to_dvobj(padapter);
set_intf_ops(padapter, &pintf->io_ops);
return _SUCCESS;
}
/*
* Increase and check if the continual_io_error of this @param dvobjprive is larger than MAX_CONTINUAL_IO_ERR
* @return true:
* @return false:
*/
int rtw_inc_and_chk_continual_io_error(struct dvobj_priv *dvobj)
{
int ret = false;
int value = atomic_inc_return(&dvobj->continual_io_error);
if (value > MAX_CONTINUAL_IO_ERR)
ret = true;
return ret;
}
/*
* Set the continual_io_error of this @param dvobjprive to 0
*/
void rtw_reset_continual_io_error(struct dvobj_priv *dvobj)
{
atomic_set(&dvobj->continual_io_error, 0);
}
| linux-master | drivers/staging/rtl8723bs/core/rtw_io.c |
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
#include <drv_types.h>
#include <rtw_debug.h>
void _rtw_init_stainfo(struct sta_info *psta);
void _rtw_init_stainfo(struct sta_info *psta)
{
memset((u8 *)psta, 0, sizeof(struct sta_info));
spin_lock_init(&psta->lock);
INIT_LIST_HEAD(&psta->list);
INIT_LIST_HEAD(&psta->hash_list);
/* INIT_LIST_HEAD(&psta->asoc_list); */
/* INIT_LIST_HEAD(&psta->sleep_list); */
/* INIT_LIST_HEAD(&psta->wakeup_list); */
INIT_LIST_HEAD(&psta->sleep_q.queue);
spin_lock_init(&psta->sleep_q.lock);
psta->sleepq_len = 0;
_rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
_rtw_init_sta_recv_priv(&psta->sta_recvpriv);
INIT_LIST_HEAD(&psta->asoc_list);
INIT_LIST_HEAD(&psta->auth_list);
psta->expire_to = 0;
psta->flags = 0;
psta->capability = 0;
psta->bpairwise_key_installed = false;
psta->nonerp_set = 0;
psta->no_short_slot_time_set = 0;
psta->no_short_preamble_set = 0;
psta->no_ht_gf_set = 0;
psta->no_ht_set = 0;
psta->ht_20mhz_set = 0;
psta->under_exist_checking = 0;
psta->keep_alive_trycnt = 0;
}
u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
{
struct sta_info *psta;
s32 i;
pstapriv->pallocated_stainfo_buf = vzalloc(sizeof(struct sta_info) * NUM_STA+4);
if (!pstapriv->pallocated_stainfo_buf)
return _FAIL;
pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
((SIZE_PTR)(pstapriv->pallocated_stainfo_buf) & 3);
INIT_LIST_HEAD(&pstapriv->free_sta_queue.queue);
spin_lock_init(&pstapriv->free_sta_queue.lock);
spin_lock_init(&pstapriv->sta_hash_lock);
/* _rtw_init_queue(&pstapriv->asoc_q); */
pstapriv->asoc_sta_count = 0;
INIT_LIST_HEAD(&pstapriv->sleep_q.queue);
spin_lock_init(&pstapriv->sleep_q.lock);
INIT_LIST_HEAD(&pstapriv->wakeup_q.queue);
spin_lock_init(&pstapriv->wakeup_q.lock);
psta = (struct sta_info *)(pstapriv->pstainfo_buf);
for (i = 0; i < NUM_STA; i++) {
_rtw_init_stainfo(psta);
INIT_LIST_HEAD(&(pstapriv->sta_hash[i]));
list_add_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue));
psta++;
}
pstapriv->sta_dz_bitmap = 0;
pstapriv->tim_bitmap = 0;
INIT_LIST_HEAD(&pstapriv->asoc_list);
INIT_LIST_HEAD(&pstapriv->auth_list);
spin_lock_init(&pstapriv->asoc_list_lock);
spin_lock_init(&pstapriv->auth_list_lock);
pstapriv->asoc_list_cnt = 0;
pstapriv->auth_list_cnt = 0;
pstapriv->auth_to = 3; /* 3*2 = 6 sec */
pstapriv->assoc_to = 3;
pstapriv->expire_to = 3; /* 3*2 = 6 sec */
pstapriv->max_num_sta = NUM_STA;
return _SUCCESS;
}
inline int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta)
{
int offset = (((u8 *)sta) - stapriv->pstainfo_buf)/sizeof(struct sta_info);
return offset;
}
inline struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int offset)
{
return (struct sta_info *)(stapriv->pstainfo_buf + offset * sizeof(struct sta_info));
}
/* this function is used to free the memory of lock || sema for all stainfos */
void kfree_all_stainfo(struct sta_priv *pstapriv);
void kfree_all_stainfo(struct sta_priv *pstapriv)
{
struct list_head *plist, *phead;
spin_lock_bh(&pstapriv->sta_hash_lock);
phead = get_list_head(&pstapriv->free_sta_queue);
plist = get_next(phead);
while (phead != plist) {
plist = get_next(plist);
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
}
void kfree_sta_priv_lock(struct sta_priv *pstapriv);
void kfree_sta_priv_lock(struct sta_priv *pstapriv)
{
kfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */
}
u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
{
struct list_head *phead, *plist;
struct sta_info *psta = NULL;
struct recv_reorder_ctrl *preorder_ctrl;
int index;
if (pstapriv) {
/*delete all reordering_ctrl_timer */
spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
list_for_each(plist, phead) {
int i;
psta = list_entry(plist, struct sta_info,
hash_list);
for (i = 0; i < 16 ; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
}
}
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
/*===============================*/
kfree_sta_priv_lock(pstapriv);
vfree(pstapriv->pallocated_stainfo_buf);
}
return _SUCCESS;
}
/* struct sta_info *rtw_alloc_stainfo(_queue *pfree_sta_queue, unsigned char *hwaddr) */
struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
s32 index;
struct list_head *phash_list;
struct sta_info *psta;
struct __queue *pfree_sta_queue;
struct recv_reorder_ctrl *preorder_ctrl;
int i = 0;
u16 wRxSeqInitialValue = 0xffff;
pfree_sta_queue = &pstapriv->free_sta_queue;
/* spin_lock_bh(&(pfree_sta_queue->lock)); */
spin_lock_bh(&(pstapriv->sta_hash_lock));
if (list_empty(&pfree_sta_queue->queue)) {
/* spin_unlock_bh(&(pfree_sta_queue->lock)); */
spin_unlock_bh(&(pstapriv->sta_hash_lock));
return NULL;
} else {
psta = container_of(get_next(&pfree_sta_queue->queue), struct sta_info, list);
list_del_init(&(psta->list));
/* spin_unlock_bh(&(pfree_sta_queue->lock)); */
_rtw_init_stainfo(psta);
psta->padapter = pstapriv->padapter;
memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
index = wifi_mac_hash(hwaddr);
if (index >= NUM_STA) {
spin_unlock_bh(&(pstapriv->sta_hash_lock));
psta = NULL;
goto exit;
}
phash_list = &(pstapriv->sta_hash[index]);
/* spin_lock_bh(&(pstapriv->sta_hash_lock)); */
list_add_tail(&psta->hash_list, phash_list);
pstapriv->asoc_sta_count++;
/* spin_unlock_bh(&(pstapriv->sta_hash_lock)); */
/* Commented by Albert 2009/08/13 */
/* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */
/* In this case, this packet will be dropped by recv_decache function if we use the 0x00 as the default value for tid_rxseq variable. */
/* So, we initialize the tid_rxseq variable as the 0xffff. */
for (i = 0; i < 16; i++)
memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i], &wRxSeqInitialValue, 2);
init_addba_retry_timer(pstapriv->padapter, psta);
/* for A-MPDU Rx reordering buffer control */
for (i = 0; i < 16 ; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
preorder_ctrl->padapter = pstapriv->padapter;
preorder_ctrl->enable = false;
preorder_ctrl->indicate_seq = 0xffff;
preorder_ctrl->wend_b = 0xffff;
/* preorder_ctrl->wsize_b = (NR_RECVBUFF-2); */
preorder_ctrl->wsize_b = 64;/* 64; */
INIT_LIST_HEAD(&preorder_ctrl->pending_recvframe_queue.queue);
spin_lock_init(&preorder_ctrl->pending_recvframe_queue.lock);
rtw_init_recv_timer(preorder_ctrl);
}
/* init for DM */
psta->rssi_stat.UndecoratedSmoothedPWDB = (-1);
psta->rssi_stat.UndecoratedSmoothedCCK = (-1);
/* init for the sequence number of received management frame */
psta->RxMgmtFrameSeqNum = 0xffff;
spin_unlock_bh(&(pstapriv->sta_hash_lock));
/* alloc mac id for non-bc/mc station, */
rtw_alloc_macid(pstapriv->padapter, psta);
}
exit:
return psta;
}
u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
{
int i;
struct __queue *pfree_sta_queue;
struct recv_reorder_ctrl *preorder_ctrl;
struct sta_xmit_priv *pstaxmitpriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct hw_xmit *phwxmit;
if (!psta)
goto exit;
spin_lock_bh(&psta->lock);
psta->state &= ~_FW_LINKED;
spin_unlock_bh(&psta->lock);
pfree_sta_queue = &pstapriv->free_sta_queue;
pstaxmitpriv = &psta->sta_xmitpriv;
/* list_del_init(&psta->sleep_list); */
/* list_del_init(&psta->wakeup_list); */
spin_lock_bh(&pxmitpriv->lock);
rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
psta->sleepq_len = 0;
/* vo */
/* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
phwxmit = pxmitpriv->hwxmits;
phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt;
pstaxmitpriv->vo_q.qcnt = 0;
/* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */
/* vi */
/* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+1;
phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt;
pstaxmitpriv->vi_q.qcnt = 0;
/* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */
/* be */
/* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
list_del_init(&(pstaxmitpriv->be_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+2;
phwxmit->accnt -= pstaxmitpriv->be_q.qcnt;
pstaxmitpriv->be_q.qcnt = 0;
/* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */
/* bk */
/* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+3;
phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt;
pstaxmitpriv->bk_q.qcnt = 0;
/* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */
spin_unlock_bh(&pxmitpriv->lock);
spin_lock_bh(&pstapriv->sta_hash_lock);
list_del_init(&psta->hash_list);
pstapriv->asoc_sta_count--;
spin_unlock_bh(&pstapriv->sta_hash_lock);
/* re-init sta_info; 20061114 will be init in alloc_stainfo */
/* _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv); */
/* _rtw_init_sta_recv_priv(&psta->sta_recvpriv); */
del_timer_sync(&psta->addba_retry_timer);
/* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
for (i = 0; i < 16 ; i++) {
struct list_head *phead, *plist;
union recv_frame *prframe;
struct __queue *ppending_recvframe_queue;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
preorder_ctrl = &psta->recvreorder_ctrl[i];
del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
spin_lock_bh(&ppending_recvframe_queue->lock);
phead = get_list_head(ppending_recvframe_queue);
plist = get_next(phead);
while (!list_empty(phead)) {
prframe = (union recv_frame *)plist;
plist = get_next(plist);
list_del_init(&(prframe->u.hdr.list));
rtw_free_recvframe(prframe, pfree_recv_queue);
}
spin_unlock_bh(&ppending_recvframe_queue->lock);
}
if (!(psta->state & WIFI_AP_STATE))
rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, false);
/* release mac id for non-bc/mc station, */
rtw_release_macid(pstapriv->padapter, psta);
/*
spin_lock_bh(&pstapriv->asoc_list_lock);
list_del_init(&psta->asoc_list);
spin_unlock_bh(&pstapriv->asoc_list_lock);
*/
spin_lock_bh(&pstapriv->auth_list_lock);
if (!list_empty(&psta->auth_list)) {
list_del_init(&psta->auth_list);
pstapriv->auth_list_cnt--;
}
spin_unlock_bh(&pstapriv->auth_list_lock);
psta->expire_to = 0;
psta->sleepq_ac_len = 0;
psta->qos_info = 0;
psta->max_sp_len = 0;
psta->uapsd_bk = 0;
psta->uapsd_be = 0;
psta->uapsd_vi = 0;
psta->uapsd_vo = 0;
psta->has_legacy_ac = 0;
pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
pstapriv->tim_bitmap &= ~BIT(psta->aid);
if ((psta->aid > 0) && (pstapriv->sta_aid[psta->aid - 1] == psta)) {
pstapriv->sta_aid[psta->aid - 1] = NULL;
psta->aid = 0;
}
psta->under_exist_checking = 0;
/* spin_lock_bh(&(pfree_sta_queue->lock)); */
list_add_tail(&psta->list, get_list_head(pfree_sta_queue));
/* spin_unlock_bh(&(pfree_sta_queue->lock)); */
exit:
return _SUCCESS;
}
/* free all stainfo which in sta_hash[all] */
void rtw_free_all_stainfo(struct adapter *padapter)
{
struct list_head *plist, *phead, *tmp;
s32 index;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter);
LIST_HEAD(stainfo_free_list);
if (pstapriv->asoc_sta_count == 1)
return;
spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
list_for_each_safe(plist, tmp, phead) {
psta = list_entry(plist, struct sta_info, hash_list);
if (pbcmc_stainfo != psta)
list_move(&psta->hash_list, &stainfo_free_list);
}
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
list_for_each_safe(plist, tmp, &stainfo_free_list) {
psta = list_entry(plist, struct sta_info, hash_list);
rtw_free_stainfo(padapter, psta);
}
}
/* any station allocated can be searched by hash list */
struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
struct list_head *plist, *phead;
struct sta_info *psta = NULL;
u32 index;
u8 *addr;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
if (!hwaddr)
return NULL;
if (is_multicast_ether_addr(hwaddr))
addr = bc_addr;
else
addr = hwaddr;
index = wifi_mac_hash(addr);
spin_lock_bh(&pstapriv->sta_hash_lock);
phead = &(pstapriv->sta_hash[index]);
list_for_each(plist, phead) {
psta = list_entry(plist, struct sta_info, hash_list);
if ((!memcmp(psta->hwaddr, addr, ETH_ALEN)))
/* if found the matched address */
break;
psta = NULL;
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
return psta;
}
u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
{
struct sta_info *psta;
NDIS_802_11_MAC_ADDRESS bcast_addr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sta_priv *pstapriv = &padapter->stapriv;
/* struct __queue *pstapending = &padapter->xmitpriv.bm_pending; */
psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
if (!psta)
return _FAIL;
/* default broadcast & multicast use macid 1 */
psta->mac_id = 1;
return _SUCCESS;
}
struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
{
struct sta_priv *pstapriv = &padapter->stapriv;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
return rtw_get_stainfo(pstapriv, bc_addr);
}
u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
{
bool res = true;
struct list_head *plist, *phead;
struct rtw_wlan_acl_node *paclnode;
bool match = false;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
list_for_each(plist, phead) {
paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
if (!memcmp(paclnode->addr, mac_addr, ETH_ALEN))
if (paclnode->valid == true) {
match = true;
break;
}
}
spin_unlock_bh(&(pacl_node_q->lock));
if (pacl_list->mode == 1) /* accept unless in deny list */
res = !match;
else if (pacl_list->mode == 2)/* deny unless in accept list */
res = match;
else
res = true;
return res;
}
| linux-master | drivers/staging/rtl8723bs/core/rtw_sta_mgt.c |
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
#include <drv_types.h>
#include <rtw_debug.h>
#include <asm/unaligned.h>
void init_mlme_ap_info(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
spin_lock_init(&pmlmepriv->bcn_update_lock);
/* for ACL */
INIT_LIST_HEAD(&pacl_list->acl_node_q.queue);
spin_lock_init(&pacl_list->acl_node_q.lock);
/* pmlmeext->bstart_bss = false; */
start_ap_mode(padapter);
}
void free_mlme_ap_info(struct adapter *padapter)
{
struct sta_info *psta = NULL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
/* stop_ap_mode(padapter); */
pmlmepriv->update_bcn = false;
pmlmeext->bstart_bss = false;
rtw_sta_flush(padapter);
pmlmeinfo->state = _HW_STATE_NOLINK_;
/* free_assoc_sta_resources */
rtw_free_all_stainfo(padapter);
/* free bc/mc sta_info */
psta = rtw_get_bcmc_stainfo(padapter);
rtw_free_stainfo(padapter, psta);
}
static void update_BCNTIM(struct adapter *padapter)
{
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
unsigned char *pie = pnetwork_mlmeext->ies;
/* update TIM IE */
u8 *p, *dst_ie, *premainder_ie = NULL, *pbackup_remainder_ie = NULL;
__le16 tim_bitmap_le;
uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
tim_bitmap_le = cpu_to_le16(pstapriv->tim_bitmap);
p = rtw_get_ie(pie + _FIXED_IE_LENGTH_,
WLAN_EID_TIM,
&tim_ielen,
pnetwork_mlmeext->ie_length - _FIXED_IE_LENGTH_
);
if (p && tim_ielen > 0) {
tim_ielen += 2;
premainder_ie = p + tim_ielen;
tim_ie_offset = (signed int)(p - pie);
remainder_ielen = pnetwork_mlmeext->ie_length - tim_ie_offset - tim_ielen;
/* append TIM IE from dst_ie offset */
dst_ie = p;
} else {
tim_ielen = 0;
/* calculate head_len */
offset = _FIXED_IE_LENGTH_;
/* get ssid_ie len */
p = rtw_get_ie(pie + _BEACON_IE_OFFSET_,
WLAN_EID_SSID,
&tmp_len,
(pnetwork_mlmeext->ie_length - _BEACON_IE_OFFSET_)
);
if (p)
offset += tmp_len + 2;
/* get supported rates len */
p = rtw_get_ie(pie + _BEACON_IE_OFFSET_,
WLAN_EID_SUPP_RATES, &tmp_len,
(pnetwork_mlmeext->ie_length - _BEACON_IE_OFFSET_)
);
if (p)
offset += tmp_len + 2;
/* DS Parameter Set IE, len =3 */
offset += 3;
premainder_ie = pie + offset;
remainder_ielen = pnetwork_mlmeext->ie_length - offset - tim_ielen;
/* append TIM IE from offset */
dst_ie = pie + offset;
}
if (remainder_ielen > 0) {
pbackup_remainder_ie = rtw_malloc(remainder_ielen);
if (pbackup_remainder_ie && premainder_ie)
memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
}
*dst_ie++ = WLAN_EID_TIM;
if ((pstapriv->tim_bitmap & 0xff00) && (pstapriv->tim_bitmap & 0x00fe))
tim_ielen = 5;
else
tim_ielen = 4;
*dst_ie++ = tim_ielen;
*dst_ie++ = 0;/* DTIM count */
*dst_ie++ = 1;/* DTIM period */
if (pstapriv->tim_bitmap & BIT(0))/* for bc/mc frames */
*dst_ie++ = BIT(0);/* bitmap ctrl */
else
*dst_ie++ = 0;
if (tim_ielen == 4) {
__le16 pvb;
if (pstapriv->tim_bitmap & 0xff00)
pvb = cpu_to_le16(pstapriv->tim_bitmap >> 8);
else
pvb = tim_bitmap_le;
*dst_ie++ = le16_to_cpu(pvb);
} else if (tim_ielen == 5) {
memcpy(dst_ie, &tim_bitmap_le, 2);
dst_ie += 2;
}
/* copy remainder IE */
if (pbackup_remainder_ie) {
memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
kfree(pbackup_remainder_ie);
}
offset = (uint)(dst_ie - pie);
pnetwork_mlmeext->ie_length = offset + remainder_ielen;
}
static u8 chk_sta_is_alive(struct sta_info *psta)
{
sta_update_last_rx_pkts(psta);
return true;
}
void expire_timeout_chk(struct adapter *padapter)
{
struct list_head *phead, *plist, *tmp;
u8 updated = false;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 chk_alive_num = 0;
char chk_alive_list[NUM_STA];
int i;
spin_lock_bh(&pstapriv->auth_list_lock);
phead = &pstapriv->auth_list;
/* check auth_queue */
list_for_each_safe(plist, tmp, phead) {
psta = list_entry(plist, struct sta_info, auth_list);
if (psta->expire_to > 0) {
psta->expire_to--;
if (psta->expire_to == 0) {
list_del_init(&psta->auth_list);
pstapriv->auth_list_cnt--;
spin_unlock_bh(&pstapriv->auth_list_lock);
rtw_free_stainfo(padapter, psta);
spin_lock_bh(&pstapriv->auth_list_lock);
}
}
}
spin_unlock_bh(&pstapriv->auth_list_lock);
psta = NULL;
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
/* check asoc_queue */
list_for_each_safe(plist, tmp, phead) {
psta = list_entry(plist, struct sta_info, asoc_list);
if (chk_sta_is_alive(psta) || !psta->expire_to) {
psta->expire_to = pstapriv->expire_to;
psta->keep_alive_trycnt = 0;
psta->under_exist_checking = 0;
} else {
if (psta->expire_to > 0)
psta->expire_to--;
}
if (psta->expire_to == 0) {
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
if (padapter->registrypriv.wifi_spec == 1) {
psta->expire_to = pstapriv->expire_to;
continue;
}
if (psta->state & WIFI_SLEEP_STATE) {
if (!(psta->state & WIFI_STA_ALIVE_CHK_STATE)) {
/* to check if alive by another methods */
/* if station is at ps mode. */
psta->expire_to = pstapriv->expire_to;
psta->state |= WIFI_STA_ALIVE_CHK_STATE;
/* to update bcn with tim_bitmap for this station */
pstapriv->tim_bitmap |= BIT(psta->aid);
update_beacon(padapter, WLAN_EID_TIM, NULL, true);
if (!pmlmeext->active_keep_alive_check)
continue;
}
}
if (pmlmeext->active_keep_alive_check) {
int stainfo_offset;
stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
if (stainfo_offset_valid(stainfo_offset))
chk_alive_list[chk_alive_num++] = stainfo_offset;
continue;
}
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, false, WLAN_REASON_DEAUTH_LEAVING);
} else {
/* TODO: Aging mechanism to digest frames in sleep_q to */
/* avoid running out of xmitframe */
if (psta->sleepq_len > (NR_XMITFRAME / pstapriv->asoc_list_cnt)
&& padapter->xmitpriv.free_xmitframe_cnt < ((
NR_XMITFRAME / pstapriv->asoc_list_cnt
) / 2)
)
wakeup_sta_to_xmit(padapter, psta);
}
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
if (chk_alive_num) {
u8 backup_oper_channel = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
/* switch to correct channel of current network before issue keep-alive frames */
if (rtw_get_oper_ch(padapter) != pmlmeext->cur_channel) {
backup_oper_channel = rtw_get_oper_ch(padapter);
SelectChannel(padapter, pmlmeext->cur_channel);
}
/* issue null data to check sta alive*/
for (i = 0; i < chk_alive_num; i++) {
int ret = _FAIL;
psta = rtw_get_stainfo_by_offset(pstapriv, chk_alive_list[i]);
if (!(psta->state & _FW_LINKED))
continue;
if (psta->state & WIFI_SLEEP_STATE)
ret = issue_nulldata(padapter, psta->hwaddr, 0, 1, 50);
else
ret = issue_nulldata(padapter, psta->hwaddr, 0, 3, 50);
psta->keep_alive_trycnt++;
if (ret == _SUCCESS) {
psta->expire_to = pstapriv->expire_to;
psta->keep_alive_trycnt = 0;
continue;
} else if (psta->keep_alive_trycnt <= 3) {
psta->expire_to = 1;
continue;
}
psta->keep_alive_trycnt = 0;
spin_lock_bh(&pstapriv->asoc_list_lock);
if (list_empty(&psta->asoc_list) == false) {
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, false,
WLAN_REASON_DEAUTH_LEAVING);
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
}
if (backup_oper_channel > 0) /* back to the original operation channel */
SelectChannel(padapter, backup_oper_channel);
}
associated_clients_update(padapter, updated);
}
void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
{
unsigned char sta_band = 0, shortGIrate = false;
unsigned int tx_ra_bitmap = 0;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct wlan_bssid_ex
*pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
if (!psta)
return;
if (!(psta->state & _FW_LINKED))
return;
rtw_hal_update_sta_rate_mask(padapter, psta);
tx_ra_bitmap = psta->ra_mask;
shortGIrate = query_ra_short_GI(psta);
if (pcur_network->configuration.ds_config > 14) {
sta_band |= WIRELESS_INVALID;
} else {
if (tx_ra_bitmap & 0xffff000)
sta_band |= WIRELESS_11_24N;
if (tx_ra_bitmap & 0xff0)
sta_band |= WIRELESS_11G;
if (tx_ra_bitmap & 0x0f)
sta_band |= WIRELESS_11B;
}
psta->wireless_mode = sta_band;
psta->raid = networktype_to_raid_ex(padapter, psta);
if (psta->aid < NUM_STA) {
u8 arg[4] = {0};
arg[0] = psta->mac_id;
arg[1] = psta->raid;
arg[2] = shortGIrate;
arg[3] = psta->init_rate;
rtw_hal_add_ra_tid(padapter, tx_ra_bitmap, arg, rssi_level);
}
}
void update_bmc_sta(struct adapter *padapter)
{
unsigned char network_type;
int supportRateNum = 0;
unsigned int tx_ra_bitmap = 0;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex
*pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
struct sta_info *psta = rtw_get_bcmc_stainfo(padapter);
if (psta) {
psta->aid = 0;/* default set to 0 */
/* psta->mac_id = psta->aid+4; */
psta->mac_id = psta->aid + 1;/* mac_id = 1 for bc/mc stainfo */
pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
psta->qos_option = 0;
psta->htpriv.ht_option = false;
psta->ieee8021x_blocked = 0;
memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
/* psta->dot118021XPrivacy = _NO_PRIVACY_;//!!! remove it, because it has been set before this. */
/* prepare for add_RATid */
supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->supported_rates);
network_type = rtw_check_network_type((u8 *)&pcur_network->supported_rates,
supportRateNum,
pcur_network->configuration.ds_config
);
if (is_supported_tx_cck(network_type)) {
network_type = WIRELESS_11B;
} else if (network_type == WIRELESS_INVALID) { /* error handling */
if (pcur_network->configuration.ds_config > 14)
network_type = WIRELESS_INVALID;
else
network_type = WIRELESS_11B;
}
update_sta_basic_rate(psta, network_type);
psta->wireless_mode = network_type;
rtw_hal_update_sta_rate_mask(padapter, psta);
tx_ra_bitmap = psta->ra_mask;
psta->raid = networktype_to_raid_ex(padapter, psta);
/* ap mode */
rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
/* if (pHalData->fw_ractrl == true) */
{
u8 arg[4] = {0};
arg[0] = psta->mac_id;
arg[1] = psta->raid;
arg[2] = 0;
arg[3] = psta->init_rate;
rtw_hal_add_ra_tid(padapter, tx_ra_bitmap, arg, 0);
}
rtw_sta_media_status_rpt(padapter, psta, 1);
spin_lock_bh(&psta->lock);
psta->state = _FW_LINKED;
spin_unlock_bh(&psta->lock);
}
}
/* notes: */
/* AID: 1~MAX for sta and 0 for bc/mc in ap/adhoc mode */
/* MAC_ID = AID+1 for sta in ap/adhoc mode */
/* MAC_ID = 1 for bc/mc for sta/ap/adhoc */
/* MAC_ID = 0 for bssid for sta/ap/adhoc */
/* CAM_ID = 0~3 for default key, cmd_id =macid + 3, macid =aid+1; */
void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
{
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
struct ht_priv *phtpriv_sta = &psta->htpriv;
u8 cur_ldpc_cap = 0, cur_stbc_cap = 0, cur_beamform_cap = 0;
/* set intf_tag to if1 */
/* psta->intf_tag = 0; */
/* psta->mac_id = psta->aid+4; */
/* psta->mac_id = psta->aid+1;//alloc macid when call rtw_alloc_stainfo(), */
/* release macid when call rtw_free_stainfo() */
/* ap mode */
rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->ieee8021x_blocked = true;
else
psta->ieee8021x_blocked = false;
/* update sta's cap */
/* ERP */
VCS_update(padapter, psta);
/* HT related cap */
if (phtpriv_sta->ht_option) {
/* check if sta supports rx ampdu */
phtpriv_sta->ampdu_enable = phtpriv_ap->ampdu_enable;
phtpriv_sta->rx_ampdu_min_spacing = (
phtpriv_sta->ht_cap.ampdu_params_info & IEEE80211_HT_CAP_AMPDU_DENSITY
) >> 2;
/* bwmode */
if ((
phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info
) & cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH))
psta->bw_mode = CHANNEL_WIDTH_40;
else
psta->bw_mode = CHANNEL_WIDTH_20;
if (pmlmeext->cur_bwmode < psta->bw_mode)
psta->bw_mode = pmlmeext->cur_bwmode;
phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
/* check if sta support s Short GI 20M */
if ((
phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info
) & cpu_to_le16(IEEE80211_HT_CAP_SGI_20))
phtpriv_sta->sgi_20m = true;
/* check if sta support s Short GI 40M */
if ((
phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info
) & cpu_to_le16(IEEE80211_HT_CAP_SGI_40)) {
if (psta->bw_mode == CHANNEL_WIDTH_40) /* according to psta->bw_mode */
phtpriv_sta->sgi_40m = true;
else
phtpriv_sta->sgi_40m = false;
}
psta->qos_option = true;
/* B0 Config LDPC Coding Capability */
if (TEST_FLAG(phtpriv_ap->ldpc_cap, LDPC_HT_ENABLE_TX) &&
GET_HT_CAPABILITY_ELE_LDPC_CAP((u8 *)(&phtpriv_sta->ht_cap)))
SET_FLAG(cur_ldpc_cap, (LDPC_HT_ENABLE_TX | LDPC_HT_CAP_TX));
/* B7 B8 B9 Config STBC setting */
if (TEST_FLAG(phtpriv_ap->stbc_cap, STBC_HT_ENABLE_TX) &&
GET_HT_CAPABILITY_ELE_RX_STBC((u8 *)(&phtpriv_sta->ht_cap)))
SET_FLAG(cur_stbc_cap, (STBC_HT_ENABLE_TX | STBC_HT_CAP_TX));
} else {
phtpriv_sta->ampdu_enable = false;
phtpriv_sta->sgi_20m = false;
phtpriv_sta->sgi_40m = false;
psta->bw_mode = CHANNEL_WIDTH_20;
phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
}
phtpriv_sta->ldpc_cap = cur_ldpc_cap;
phtpriv_sta->stbc_cap = cur_stbc_cap;
phtpriv_sta->beamform_cap = cur_beamform_cap;
/* Rx AMPDU */
send_delba(padapter, 0, psta->hwaddr);/* recipient */
/* TX AMPDU */
send_delba(padapter, 1, psta->hwaddr);/* originator */
phtpriv_sta->agg_enable_bitmap = 0x0;/* reset */
phtpriv_sta->candidate_tid_bitmap = 0x0;/* reset */
update_ldpc_stbc_cap(psta);
/* todo: init other variables */
memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
/* add ratid */
/* add_RATid(padapter, psta);//move to ap_sta_info_defer_update() */
spin_lock_bh(&psta->lock);
psta->state |= _FW_LINKED;
spin_unlock_bh(&psta->lock);
}
static void update_ap_info(struct adapter *padapter, struct sta_info *psta)
{
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct wlan_bssid_ex
*pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
psta->wireless_mode = pmlmeext->cur_wireless_mode;
psta->bssratelen = rtw_get_rateset_len(pnetwork->supported_rates);
memcpy(psta->bssrateset, pnetwork->supported_rates, psta->bssratelen);
/* HT related cap */
if (phtpriv_ap->ht_option) {
/* check if sta supports rx ampdu */
/* phtpriv_ap->ampdu_enable = phtpriv_ap->ampdu_enable; */
/* check if sta support s Short GI 20M */
if ((phtpriv_ap->ht_cap.cap_info) & cpu_to_le16(IEEE80211_HT_CAP_SGI_20))
phtpriv_ap->sgi_20m = true;
/* check if sta support s Short GI 40M */
if ((phtpriv_ap->ht_cap.cap_info) & cpu_to_le16(IEEE80211_HT_CAP_SGI_40))
phtpriv_ap->sgi_40m = true;
psta->qos_option = true;
} else {
phtpriv_ap->ampdu_enable = false;
phtpriv_ap->sgi_20m = false;
phtpriv_ap->sgi_40m = false;
}
psta->bw_mode = pmlmeext->cur_bwmode;
phtpriv_ap->ch_offset = pmlmeext->cur_ch_offset;
phtpriv_ap->agg_enable_bitmap = 0x0;/* reset */
phtpriv_ap->candidate_tid_bitmap = 0x0;/* reset */
memcpy(&psta->htpriv, &pmlmepriv->htpriv, sizeof(struct ht_priv));
}
static void update_hw_ht_param(struct adapter *padapter)
{
unsigned char max_AMPDU_len;
unsigned char min_MPDU_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
/* handle A-MPDU parameter field
*
* AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
* AMPDU_para [4:2]:Min MPDU Start Spacing
*/
max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
/* */
/* Config SM Power Save setting */
/* */
pmlmeinfo->SM_PS = (le16_to_cpu(
pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info
) & 0x0C) >> 2;
/* */
/* Config current HT Protection mode. */
/* */
/* pmlmeinfo->HT_protection = pmlmeinfo->HT_info.infos[1] & 0x3; */
}
void start_bss_network(struct adapter *padapter)
{
u8 *p;
u8 val8, cur_channel, cur_bwmode, cur_ch_offset;
u16 bcn_interval;
u32 acparm;
int ie_len;
struct registry_priv *pregpriv = &padapter->registrypriv;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct security_priv *psecuritypriv = &(padapter->securitypriv);
struct wlan_bssid_ex
*pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network);
struct HT_info_element *pht_info = NULL;
u8 cbw40_enable = 0;
bcn_interval = (u16)pnetwork->configuration.beacon_period;
cur_channel = pnetwork->configuration.ds_config;
cur_bwmode = CHANNEL_WIDTH_20;
cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
/* check if there is wps ie, */
/* if there is wpsie in beacon, the hostapd will update beacon twice when stating hostapd, */
/* and at first time the security ie (RSN/WPA IE) will not include in beacon. */
if (!rtw_get_wps_ie(pnetwork->ies + _FIXED_IE_LENGTH_,
pnetwork->ie_length - _FIXED_IE_LENGTH_, NULL, NULL))
pmlmeext->bstart_bss = true;
/* todo: update wmm, ht cap */
/* pmlmeinfo->WMM_enable; */
/* pmlmeinfo->HT_enable; */
if (pmlmepriv->qospriv.qos_option)
pmlmeinfo->WMM_enable = true;
if (pmlmepriv->htpriv.ht_option) {
pmlmeinfo->WMM_enable = true;
pmlmeinfo->HT_enable = true;
/* pmlmeinfo->HT_info_enable = true; */
/* pmlmeinfo->HT_caps_enable = true; */
update_hw_ht_param(padapter);
}
if (!pmlmepriv->cur_network.join_res) { /* setting only at first time */
/* WEP Key will be set before this function, do not clear CAM. */
if ((psecuritypriv->dot11PrivacyAlgrthm != _WEP40_) &&
(psecuritypriv->dot11PrivacyAlgrthm != _WEP104_))
flush_all_cam_entry(padapter); /* clear CAM */
}
/* set MSR to AP_Mode */
Set_MSR(padapter, _HW_STATE_AP_);
/* Set BSSID REG */
rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, pnetwork->mac_address);
/* Set EDCA param reg */
acparm = 0x002F3217; /* VO */
rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acparm));
acparm = 0x005E4317; /* VI */
rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acparm));
/* acparm = 0x00105320; // BE */
acparm = 0x005ea42b;
rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acparm));
acparm = 0x0000A444; /* BK */
rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acparm));
/* Set Security */
val8 = (
psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X
) ? 0xcc : 0xcf;
rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
/* Beacon Control related register */
rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&bcn_interval));
rtw_hal_set_hwreg(padapter, HW_VAR_DO_IQK, NULL);
if (!pmlmepriv->cur_network.join_res) { /* setting only at first time */
/* u32 initialgain; */
/* initialgain = 0x1e; */
/* disable dynamic functions, such as high power, DIG */
/* Save_DM_Func_Flag(padapter); */
/* Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false); */
/* turn on all dynamic functions */
Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
/* rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */
}
/* set channel, bwmode */
p = rtw_get_ie((pnetwork->ies + sizeof(struct ndis_802_11_fix_ie)),
WLAN_EID_HT_OPERATION,
&ie_len,
(pnetwork->ie_length - sizeof(struct ndis_802_11_fix_ie))
);
if (p && ie_len) {
pht_info = (struct HT_info_element *)(p + 2);
if (cur_channel > 14) {
if ((pregpriv->bw_mode & 0xf0) > 0)
cbw40_enable = 1;
} else {
if ((pregpriv->bw_mode & 0x0f) > 0)
cbw40_enable = 1;
}
if ((cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
/* switch to the 40M Hz mode */
/* pmlmeext->cur_bwmode = CHANNEL_WIDTH_40; */
cur_bwmode = CHANNEL_WIDTH_40;
switch (pht_info->infos[0] & 0x3) {
case 1:
/* pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER; */
cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
break;
case 3:
/* pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER; */
cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
break;
default:
/* pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE; */
cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
break;
}
}
}
set_channel_bwmode(padapter, cur_channel, cur_ch_offset, cur_bwmode);
pmlmeext->cur_channel = cur_channel;
pmlmeext->cur_bwmode = cur_bwmode;
pmlmeext->cur_ch_offset = cur_ch_offset;
pmlmeext->cur_wireless_mode = pmlmepriv->cur_network.network_type;
/* let pnetwork_mlmeext == pnetwork_mlme. */
memcpy(pnetwork_mlmeext, pnetwork, pnetwork->length);
/* update cur_wireless_mode */
update_wireless_mode(padapter);
/* update RRSR after set channel and bandwidth */
UpdateBrateTbl(padapter, pnetwork->supported_rates);
rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, pnetwork->supported_rates);
/* update capability after cur_wireless_mode updated */
update_capinfo(
padapter,
rtw_get_capability((struct wlan_bssid_ex *)pnetwork)
);
if (pmlmeext->bstart_bss) {
update_beacon(padapter, WLAN_EID_TIM, NULL, true);
/* issue beacon frame */
send_beacon(padapter);
}
/* update bc/mc sta_info */
update_bmc_sta(padapter);
/* pmlmeext->bstart_bss = true; */
}
int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
{
int ret = _SUCCESS;
u8 *p;
u8 *pHT_caps_ie = NULL;
u8 *pHT_info_ie = NULL;
struct sta_info *psta = NULL;
u16 cap, ht_cap = false;
uint ie_len = 0;
int group_cipher, pairwise_cipher;
u8 channel, network_type, supportRate[NDIS_802_11_LENGTH_RATES_EX];
int supportRateNum = 0;
u8 OUI1[] = {0x00, 0x50, 0xf2, 0x01};
u8 WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct wlan_bssid_ex
*pbss_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
u8 *ie = pbss_network->ies;
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return _FAIL;
if (len < 0 || len > MAX_IE_SZ)
return _FAIL;
pbss_network->ie_length = len;
memset(ie, 0, MAX_IE_SZ);
memcpy(ie, pbuf, pbss_network->ie_length);
if (pbss_network->infrastructure_mode != Ndis802_11APMode)
return _FAIL;
pbss_network->rssi = 0;
memcpy(pbss_network->mac_address, myid(&(padapter->eeprompriv)), ETH_ALEN);
/* beacon interval */
p = rtw_get_beacon_interval_from_ie(ie);/* ie + 8; 8: TimeStamp, 2: Beacon Interval 2:Capability */
/* pbss_network->configuration.beacon_period = le16_to_cpu(*(unsigned short*)p); */
pbss_network->configuration.beacon_period = get_unaligned_le16(p);
/* capability */
/* cap = *(unsigned short *)rtw_get_capability_from_ie(ie); */
/* cap = le16_to_cpu(cap); */
cap = get_unaligned_le16(ie);
/* SSID */
p = rtw_get_ie(
ie + _BEACON_IE_OFFSET_,
WLAN_EID_SSID,
&ie_len,
(pbss_network->ie_length - _BEACON_IE_OFFSET_)
);
if (p && ie_len > 0) {
memset(&pbss_network->ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(pbss_network->ssid.ssid, (p + 2), ie_len);
pbss_network->ssid.ssid_length = ie_len;
}
/* channel */
channel = 0;
pbss_network->configuration.length = 0;
p = rtw_get_ie(
ie + _BEACON_IE_OFFSET_,
WLAN_EID_DS_PARAMS, &ie_len,
(pbss_network->ie_length - _BEACON_IE_OFFSET_)
);
if (p && ie_len > 0)
channel = *(p + 2);
pbss_network->configuration.ds_config = channel;
memset(supportRate, 0, NDIS_802_11_LENGTH_RATES_EX);
/* get supported rates */
p = rtw_get_ie(
ie + _BEACON_IE_OFFSET_,
WLAN_EID_SUPP_RATES,
&ie_len,
(pbss_network->ie_length - _BEACON_IE_OFFSET_)
);
if (p) {
memcpy(supportRate, p + 2, ie_len);
supportRateNum = ie_len;
}
/* get ext_supported rates */
p = rtw_get_ie(
ie + _BEACON_IE_OFFSET_,
WLAN_EID_EXT_SUPP_RATES,
&ie_len,
pbss_network->ie_length - _BEACON_IE_OFFSET_
);
if (p) {
memcpy(supportRate + supportRateNum, p + 2, ie_len);
supportRateNum += ie_len;
}
network_type = rtw_check_network_type(supportRate, supportRateNum, channel);
rtw_set_supported_rate(pbss_network->supported_rates, network_type);
/* parsing ERP_IE */
p = rtw_get_ie(
ie + _BEACON_IE_OFFSET_,
WLAN_EID_ERP_INFO,
&ie_len,
(pbss_network->ie_length - _BEACON_IE_OFFSET_)
);
if (p && ie_len > 0)
ERP_IE_handler(padapter, (struct ndis_80211_var_ie *)p);
/* update privacy/security */
if (cap & BIT(4))
pbss_network->privacy = 1;
else
pbss_network->privacy = 0;
psecuritypriv->wpa_psk = 0;
/* wpa2 */
group_cipher = 0; pairwise_cipher = 0;
psecuritypriv->wpa2_group_cipher = _NO_PRIVACY_;
psecuritypriv->wpa2_pairwise_cipher = _NO_PRIVACY_;
p = rtw_get_ie(
ie + _BEACON_IE_OFFSET_,
WLAN_EID_RSN,
&ie_len,
(pbss_network->ie_length - _BEACON_IE_OFFSET_)
);
if (p && ie_len > 0) {
if (rtw_parse_wpa2_ie(
p,
ie_len + 2,
&group_cipher,
&pairwise_cipher,
NULL
) == _SUCCESS) {
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
psecuritypriv->wpa_psk |= BIT(1);
psecuritypriv->wpa2_group_cipher = group_cipher;
psecuritypriv->wpa2_pairwise_cipher = pairwise_cipher;
}
}
/* wpa */
ie_len = 0;
group_cipher = 0; pairwise_cipher = 0;
psecuritypriv->wpa_group_cipher = _NO_PRIVACY_;
psecuritypriv->wpa_pairwise_cipher = _NO_PRIVACY_;
for (p = ie + _BEACON_IE_OFFSET_; ; p += (ie_len + 2)) {
p = rtw_get_ie(
p,
WLAN_EID_VENDOR_SPECIFIC,
&ie_len,
(pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2))
);
if ((p) && (!memcmp(p + 2, OUI1, 4))) {
if (rtw_parse_wpa_ie(
p,
ie_len + 2,
&group_cipher,
&pairwise_cipher,
NULL
) == _SUCCESS) {
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
psecuritypriv->wpa_psk |= BIT(0);
psecuritypriv->wpa_group_cipher = group_cipher;
psecuritypriv->wpa_pairwise_cipher = pairwise_cipher;
}
break;
}
if (!p || ie_len == 0)
break;
}
/* wmm */
ie_len = 0;
pmlmepriv->qospriv.qos_option = 0;
if (pregistrypriv->wmm_enable) {
for (p = ie + _BEACON_IE_OFFSET_; ; p += (ie_len + 2)) {
p = rtw_get_ie(
p,
WLAN_EID_VENDOR_SPECIFIC,
&ie_len,
(pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2))
);
if ((p) && !memcmp(p + 2, WMM_PARA_IE, 6)) {
pmlmepriv->qospriv.qos_option = 1;
*(p + 8) |= BIT(7);/* QoS Info, support U-APSD */
/* disable all ACM bits since the WMM admission */
/* control is not supported */
*(p + 10) &= ~BIT(4); /* BE */
*(p + 14) &= ~BIT(4); /* BK */
*(p + 18) &= ~BIT(4); /* VI */
*(p + 22) &= ~BIT(4); /* VO */
break;
}
if (!p || ie_len == 0)
break;
}
}
/* parsing HT_CAP_IE */
p = rtw_get_ie(
ie + _BEACON_IE_OFFSET_,
WLAN_EID_HT_CAPABILITY,
&ie_len,
(pbss_network->ie_length - _BEACON_IE_OFFSET_)
);
if (p && ie_len > 0) {
u8 max_rx_ampdu_factor = 0;
struct ieee80211_ht_cap *pht_cap = (struct ieee80211_ht_cap *)(p + 2);
pHT_caps_ie = p;
ht_cap = true;
network_type |= WIRELESS_11_24N;
rtw_ht_use_default_setting(padapter);
if (pmlmepriv->htpriv.sgi_20m == false)
pht_cap->cap_info &= cpu_to_le16(~(IEEE80211_HT_CAP_SGI_20));
if (pmlmepriv->htpriv.sgi_40m == false)
pht_cap->cap_info &= cpu_to_le16(~(IEEE80211_HT_CAP_SGI_40));
if (!TEST_FLAG(pmlmepriv->htpriv.ldpc_cap, LDPC_HT_ENABLE_RX))
pht_cap->cap_info &= cpu_to_le16(~(IEEE80211_HT_CAP_LDPC_CODING));
if (!TEST_FLAG(pmlmepriv->htpriv.stbc_cap, STBC_HT_ENABLE_TX))
pht_cap->cap_info &= cpu_to_le16(~(IEEE80211_HT_CAP_TX_STBC));
if (!TEST_FLAG(pmlmepriv->htpriv.stbc_cap, STBC_HT_ENABLE_RX))
pht_cap->cap_info &= cpu_to_le16(~(IEEE80211_HT_CAP_RX_STBC_3R));
pht_cap->ampdu_params_info &= ~(
IEEE80211_HT_CAP_AMPDU_FACTOR | IEEE80211_HT_CAP_AMPDU_DENSITY
);
if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
(psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP)) {
pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY &
(0x07 << 2));
} else {
pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY &
0x00);
}
rtw_hal_get_def_var(
padapter,
HW_VAR_MAX_RX_AMPDU_FACTOR,
&max_rx_ampdu_factor
);
pht_cap->ampdu_params_info |= (
IEEE80211_HT_CAP_AMPDU_FACTOR & max_rx_ampdu_factor
); /* set Max Rx AMPDU size to 64K */
pht_cap->mcs.rx_mask[0] = 0xff;
pht_cap->mcs.rx_mask[1] = 0x0;
memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len);
}
/* parsing HT_INFO_IE */
p = rtw_get_ie(
ie + _BEACON_IE_OFFSET_,
WLAN_EID_HT_OPERATION,
&ie_len,
(pbss_network->ie_length - _BEACON_IE_OFFSET_)
);
if (p && ie_len > 0)
pHT_info_ie = p;
switch (network_type) {
case WIRELESS_11B:
pbss_network->network_type_in_use = Ndis802_11DS;
break;
case WIRELESS_11G:
case WIRELESS_11BG:
case WIRELESS_11G_24N:
case WIRELESS_11BG_24N:
pbss_network->network_type_in_use = Ndis802_11OFDM24;
break;
default:
pbss_network->network_type_in_use = Ndis802_11OFDM24;
break;
}
pmlmepriv->cur_network.network_type = network_type;
pmlmepriv->htpriv.ht_option = false;
if ((psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_TKIP) ||
(psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
/* todo: */
/* ht_cap = false; */
}
/* ht_cap */
if (pregistrypriv->ht_enable && ht_cap) {
pmlmepriv->htpriv.ht_option = true;
pmlmepriv->qospriv.qos_option = 1;
if (pregistrypriv->ampdu_enable == 1)
pmlmepriv->htpriv.ampdu_enable = true;
HT_caps_handler(padapter, (struct ndis_80211_var_ie *)pHT_caps_ie);
HT_info_handler(padapter, (struct ndis_80211_var_ie *)pHT_info_ie);
}
pbss_network->length = get_wlan_bssid_ex_sz(
(struct wlan_bssid_ex *)pbss_network
);
/* issue beacon to start bss network */
/* start_bss_network(padapter, (u8 *)pbss_network); */
rtw_startbss_cmd(padapter, RTW_CMDF_WAIT_ACK);
/* alloc sta_info for ap itself */
psta = rtw_get_stainfo(&padapter->stapriv, pbss_network->mac_address);
if (!psta) {
psta = rtw_alloc_stainfo(&padapter->stapriv, pbss_network->mac_address);
if (!psta)
return _FAIL;
}
/* update AP's sta info */
update_ap_info(padapter, psta);
psta->state |= WIFI_AP_STATE; /* Aries, add, fix bug of flush_cam_entry at STOP AP mode , 0724 */
rtw_indicate_connect(padapter);
pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */
/* update bc/mc sta_info */
/* update_bmc_sta(padapter); */
return ret;
}
void rtw_set_macaddr_acl(struct adapter *padapter, int mode)
{
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
pacl_list->mode = mode;
}
int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
{
struct list_head *plist, *phead;
u8 added = false;
int i, ret = 0;
struct rtw_wlan_acl_node *paclnode;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
if ((NUM_ACL - 1) < pacl_list->num)
return (-1);
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
list_for_each(plist, phead) {
paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
if (paclnode->valid == true) {
added = true;
break;
}
}
}
spin_unlock_bh(&(pacl_node_q->lock));
if (added)
return ret;
spin_lock_bh(&(pacl_node_q->lock));
for (i = 0; i < NUM_ACL; i++) {
paclnode = &pacl_list->aclnode[i];
if (!paclnode->valid) {
INIT_LIST_HEAD(&paclnode->list);
memcpy(paclnode->addr, addr, ETH_ALEN);
paclnode->valid = true;
list_add_tail(&paclnode->list, get_list_head(pacl_node_q));
pacl_list->num++;
break;
}
}
spin_unlock_bh(&(pacl_node_q->lock));
return ret;
}
void rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
{
struct list_head *plist, *phead, *tmp;
struct rtw_wlan_acl_node *paclnode;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
list_for_each_safe(plist, tmp, phead) {
paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
if (
!memcmp(paclnode->addr, addr, ETH_ALEN) ||
is_broadcast_ether_addr(addr)
) {
if (paclnode->valid) {
paclnode->valid = false;
list_del_init(&paclnode->list);
pacl_list->num--;
}
}
}
spin_unlock_bh(&(pacl_node_q->lock));
}
u8 rtw_ap_set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
{
struct cmd_obj *ph2c;
struct set_stakey_parm *psetstakey_para;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
psetstakey_para->algorithm = (u8)psta->dot118021XPrivacy;
memcpy(psetstakey_para->addr, psta->hwaddr, ETH_ALEN);
memcpy(psetstakey_para->key, &psta->dot118021x_UncstKey, 16);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
static int rtw_ap_set_key(
struct adapter *padapter,
u8 *key,
u8 alg,
int keyid,
u8 set_tx
)
{
u8 keylen;
struct cmd_obj *pcmd;
struct setkey_parm *psetkeyparm;
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
int res = _SUCCESS;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (!pcmd) {
res = _FAIL;
goto exit;
}
psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
if (!psetkeyparm) {
kfree(pcmd);
res = _FAIL;
goto exit;
}
psetkeyparm->keyid = (u8)keyid;
if (is_wep_enc(alg))
padapter->securitypriv.key_mask |= BIT(psetkeyparm->keyid);
psetkeyparm->algorithm = alg;
psetkeyparm->set_tx = set_tx;
switch (alg) {
case _WEP40_:
keylen = 5;
break;
case _WEP104_:
keylen = 13;
break;
case _TKIP_:
case _TKIP_WTMIC_:
case _AES_:
default:
keylen = 16;
}
memcpy(&(psetkeyparm->key[0]), key, keylen);
pcmd->cmdcode = _SetKey_CMD_;
pcmd->parmbuf = (u8 *)psetkeyparm;
pcmd->cmdsz = (sizeof(struct setkey_parm));
pcmd->rsp = NULL;
pcmd->rspsz = 0;
INIT_LIST_HEAD(&pcmd->list);
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
exit:
return res;
}
int rtw_ap_set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
{
return rtw_ap_set_key(padapter, key, alg, keyid, 1);
}
int rtw_ap_set_wep_key(
struct adapter *padapter,
u8 *key,
u8 keylen,
int keyid,
u8 set_tx
)
{
u8 alg;
switch (keylen) {
case 5:
alg = _WEP40_;
break;
case 13:
alg = _WEP104_;
break;
default:
alg = _NO_PRIVACY_;
}
return rtw_ap_set_key(padapter, key, alg, keyid, set_tx);
}
static void update_bcn_fixed_ie(struct adapter *padapter)
{
}
static void update_bcn_erpinfo_ie(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
unsigned char *p, *ie = pnetwork->ies;
u32 len = 0;
if (!pmlmeinfo->ERP_enable)
return;
/* parsing ERP_IE */
p = rtw_get_ie(
ie + _BEACON_IE_OFFSET_,
WLAN_EID_ERP_INFO,
&len,
(pnetwork->ie_length - _BEACON_IE_OFFSET_)
);
if (p && len > 0) {
struct ndis_80211_var_ie *pIE = (struct ndis_80211_var_ie *)p;
if (pmlmepriv->num_sta_non_erp == 1)
pIE->data[0] |= RTW_ERP_INFO_NON_ERP_PRESENT | RTW_ERP_INFO_USE_PROTECTION;
else
pIE->data[0] &= ~(
RTW_ERP_INFO_NON_ERP_PRESENT | RTW_ERP_INFO_USE_PROTECTION
);
if (pmlmepriv->num_sta_no_short_preamble > 0)
pIE->data[0] |= RTW_ERP_INFO_BARKER_PREAMBLE_MODE;
else
pIE->data[0] &= ~(RTW_ERP_INFO_BARKER_PREAMBLE_MODE);
ERP_IE_handler(padapter, pIE);
}
}
static void update_bcn_htcap_ie(struct adapter *padapter)
{
}
static void update_bcn_htinfo_ie(struct adapter *padapter)
{
}
static void update_bcn_rsn_ie(struct adapter *padapter)
{
}
static void update_bcn_wpa_ie(struct adapter *padapter)
{
}
static void update_bcn_wmm_ie(struct adapter *padapter)
{
}
static void update_bcn_wps_ie(struct adapter *padapter)
{
u8 *pwps_ie = NULL;
u8 *pwps_ie_src;
u8 *premainder_ie;
u8 *pbackup_remainder_ie = NULL;
uint wps_ielen = 0, wps_offset, remainder_ielen;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
unsigned char *ie = pnetwork->ies;
u32 ielen = pnetwork->ie_length;
pwps_ie = rtw_get_wps_ie(
ie + _FIXED_IE_LENGTH_,
ielen - _FIXED_IE_LENGTH_,
NULL,
&wps_ielen
);
if (!pwps_ie || wps_ielen == 0)
return;
pwps_ie_src = pmlmepriv->wps_beacon_ie;
if (!pwps_ie_src)
return;
wps_offset = (uint)(pwps_ie - ie);
premainder_ie = pwps_ie + wps_ielen;
remainder_ielen = ielen - wps_offset - wps_ielen;
if (remainder_ielen > 0) {
pbackup_remainder_ie = rtw_malloc(remainder_ielen);
if (pbackup_remainder_ie)
memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
}
wps_ielen = (uint)pwps_ie_src[1];/* to get ie data len */
if ((wps_offset + wps_ielen + 2 + remainder_ielen) <= MAX_IE_SZ) {
memcpy(pwps_ie, pwps_ie_src, wps_ielen + 2);
pwps_ie += (wps_ielen+2);
if (pbackup_remainder_ie)
memcpy(pwps_ie, pbackup_remainder_ie, remainder_ielen);
/* update ie_length */
pnetwork->ie_length = wps_offset + (wps_ielen + 2) + remainder_ielen;
}
kfree(pbackup_remainder_ie);
}
static void update_bcn_p2p_ie(struct adapter *padapter)
{
}
static void update_bcn_vendor_spec_ie(struct adapter *padapter, u8 *oui)
{
if (!memcmp(RTW_WPA_OUI, oui, 4))
update_bcn_wpa_ie(padapter);
else if (!memcmp(WMM_OUI, oui, 4))
update_bcn_wmm_ie(padapter);
else if (!memcmp(WPS_OUI, oui, 4))
update_bcn_wps_ie(padapter);
else if (!memcmp(P2P_OUI, oui, 4))
update_bcn_p2p_ie(padapter);
}
void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
{
struct mlme_priv *pmlmepriv;
struct mlme_ext_priv *pmlmeext;
/* struct mlme_ext_info *pmlmeinfo; */
if (!padapter)
return;
pmlmepriv = &(padapter->mlmepriv);
pmlmeext = &(padapter->mlmeextpriv);
/* pmlmeinfo = &(pmlmeext->mlmext_info); */
if (!pmlmeext->bstart_bss)
return;
spin_lock_bh(&pmlmepriv->bcn_update_lock);
switch (ie_id) {
case 0xFF:
update_bcn_fixed_ie(padapter);/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
break;
case WLAN_EID_TIM:
update_BCNTIM(padapter);
break;
case WLAN_EID_ERP_INFO:
update_bcn_erpinfo_ie(padapter);
break;
case WLAN_EID_HT_CAPABILITY:
update_bcn_htcap_ie(padapter);
break;
case WLAN_EID_RSN:
update_bcn_rsn_ie(padapter);
break;
case WLAN_EID_HT_OPERATION:
update_bcn_htinfo_ie(padapter);
break;
case WLAN_EID_VENDOR_SPECIFIC:
update_bcn_vendor_spec_ie(padapter, oui);
break;
default:
break;
}
pmlmepriv->update_bcn = true;
spin_unlock_bh(&pmlmepriv->bcn_update_lock);
if (tx) {
/* send_beacon(padapter);//send_beacon must execute on TSR level */
set_tx_beacon_cmd(padapter);
}
}
/*
* op_mode
* Set to 0 (HT pure) under the following conditions
* - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
* - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
* Set to 1 (HT non-member protection) if there may be non-HT STAs
* in both the primary and the secondary channel
* Set to 2 if only HT STAs are associated in BSS,
* however and at least one 20 MHz HT STA is associated
* Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
* (currently non-GF HT station is considered as non-HT STA also)
*/
static int rtw_ht_operation_update(struct adapter *padapter)
{
u16 cur_op_mode, new_op_mode;
int op_mode_changes = 0;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
if (pmlmepriv->htpriv.ht_option)
return 0;
if (!(pmlmepriv->ht_op_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
&& pmlmepriv->num_sta_ht_no_gf) {
pmlmepriv->ht_op_mode |=
IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT;
op_mode_changes++;
} else if ((pmlmepriv->ht_op_mode &
IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
pmlmepriv->num_sta_ht_no_gf == 0) {
pmlmepriv->ht_op_mode &=
~IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT;
op_mode_changes++;
}
if (!(pmlmepriv->ht_op_mode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT) &&
(pmlmepriv->num_sta_no_ht || pmlmepriv->olbc_ht)) {
pmlmepriv->ht_op_mode |= IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
op_mode_changes++;
} else if ((pmlmepriv->ht_op_mode &
IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT) &&
(pmlmepriv->num_sta_no_ht == 0 && !pmlmepriv->olbc_ht)) {
pmlmepriv->ht_op_mode &=
~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
op_mode_changes++;
}
/* Note: currently we switch to the MIXED op mode if HT non-greenfield
* station is associated. Probably it's a theoretical case, since
* it looks like all known HT STAs support greenfield.
*/
new_op_mode = 0;
if (pmlmepriv->num_sta_no_ht ||
(pmlmepriv->ht_op_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
new_op_mode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
else if (
(le16_to_cpu(phtpriv_ap->ht_cap.cap_info) & IEEE80211_HT_CAP_SUP_WIDTH)
&& pmlmepriv->num_sta_ht_20mhz)
new_op_mode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
else if (pmlmepriv->olbc_ht)
new_op_mode = IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER;
else
new_op_mode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
cur_op_mode = pmlmepriv->ht_op_mode & IEEE80211_HT_OP_MODE_PROTECTION;
if (cur_op_mode != new_op_mode) {
pmlmepriv->ht_op_mode &= ~IEEE80211_HT_OP_MODE_PROTECTION;
pmlmepriv->ht_op_mode |= new_op_mode;
op_mode_changes++;
}
return op_mode_changes;
}
void associated_clients_update(struct adapter *padapter, u8 updated)
{
/* update associated stations cap. */
if (updated) {
struct list_head *phead, *plist;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
/* check asoc_queue */
list_for_each(plist, phead) {
psta = list_entry(plist, struct sta_info, asoc_list);
VCS_update(padapter, psta);
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
}
}
/* called > TSR LEVEL for USB or SDIO Interface*/
void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
{
u8 beacon_updated = false;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE)) {
if (!psta->no_short_preamble_set) {
psta->no_short_preamble_set = 1;
pmlmepriv->num_sta_no_short_preamble++;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
(pmlmepriv->num_sta_no_short_preamble == 1)) {
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
}
} else {
if (psta->no_short_preamble_set) {
psta->no_short_preamble_set = 0;
pmlmepriv->num_sta_no_short_preamble--;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
(pmlmepriv->num_sta_no_short_preamble == 0)) {
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
}
}
if (psta->flags & WLAN_STA_NONERP) {
if (!psta->nonerp_set) {
psta->nonerp_set = 1;
pmlmepriv->num_sta_non_erp++;
if (pmlmepriv->num_sta_non_erp == 1) {
beacon_updated = true;
update_beacon(padapter, WLAN_EID_ERP_INFO, NULL, true);
}
}
} else {
if (psta->nonerp_set) {
psta->nonerp_set = 0;
pmlmepriv->num_sta_non_erp--;
if (pmlmepriv->num_sta_non_erp == 0) {
beacon_updated = true;
update_beacon(padapter, WLAN_EID_ERP_INFO, NULL, true);
}
}
}
if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)) {
if (!psta->no_short_slot_time_set) {
psta->no_short_slot_time_set = 1;
pmlmepriv->num_sta_no_short_slot_time++;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
(pmlmepriv->num_sta_no_short_slot_time == 1)) {
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
}
} else {
if (psta->no_short_slot_time_set) {
psta->no_short_slot_time_set = 0;
pmlmepriv->num_sta_no_short_slot_time--;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
(pmlmepriv->num_sta_no_short_slot_time == 0)) {
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
}
}
if (psta->flags & WLAN_STA_HT) {
u16 ht_capab = le16_to_cpu(psta->htpriv.ht_cap.cap_info);
if (psta->no_ht_set) {
psta->no_ht_set = 0;
pmlmepriv->num_sta_no_ht--;
}
if ((ht_capab & IEEE80211_HT_CAP_GRN_FLD) == 0) {
if (!psta->no_ht_gf_set) {
psta->no_ht_gf_set = 1;
pmlmepriv->num_sta_ht_no_gf++;
}
}
if ((ht_capab & IEEE80211_HT_CAP_SUP_WIDTH) == 0) {
if (!psta->ht_20mhz_set) {
psta->ht_20mhz_set = 1;
pmlmepriv->num_sta_ht_20mhz++;
}
}
} else {
if (!psta->no_ht_set) {
psta->no_ht_set = 1;
pmlmepriv->num_sta_no_ht++;
}
}
if (rtw_ht_operation_update(padapter) > 0) {
update_beacon(padapter, WLAN_EID_HT_CAPABILITY, NULL, false);
update_beacon(padapter, WLAN_EID_HT_OPERATION, NULL, true);
}
/* update associated stations cap. */
associated_clients_update(padapter, beacon_updated);
}
u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
{
u8 beacon_updated = false;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
if (!psta)
return beacon_updated;
if (psta->no_short_preamble_set) {
psta->no_short_preamble_set = 0;
pmlmepriv->num_sta_no_short_preamble--;
if (pmlmeext->cur_wireless_mode > WIRELESS_11B
&& pmlmepriv->num_sta_no_short_preamble == 0){
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
}
if (psta->nonerp_set) {
psta->nonerp_set = 0;
pmlmepriv->num_sta_non_erp--;
if (pmlmepriv->num_sta_non_erp == 0) {
beacon_updated = true;
update_beacon(padapter, WLAN_EID_ERP_INFO, NULL, true);
}
}
if (psta->no_short_slot_time_set) {
psta->no_short_slot_time_set = 0;
pmlmepriv->num_sta_no_short_slot_time--;
if (pmlmeext->cur_wireless_mode > WIRELESS_11B
&& pmlmepriv->num_sta_no_short_slot_time == 0){
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
}
if (psta->no_ht_gf_set) {
psta->no_ht_gf_set = 0;
pmlmepriv->num_sta_ht_no_gf--;
}
if (psta->no_ht_set) {
psta->no_ht_set = 0;
pmlmepriv->num_sta_no_ht--;
}
if (psta->ht_20mhz_set) {
psta->ht_20mhz_set = 0;
pmlmepriv->num_sta_ht_20mhz--;
}
if (rtw_ht_operation_update(padapter) > 0) {
update_beacon(padapter, WLAN_EID_HT_CAPABILITY, NULL, false);
update_beacon(padapter, WLAN_EID_HT_OPERATION, NULL, true);
}
return beacon_updated;
}
u8 ap_free_sta(
struct adapter *padapter,
struct sta_info *psta,
bool active,
u16 reason
)
{
u8 beacon_updated = false;
if (!psta)
return beacon_updated;
if (active) {
/* tear down Rx AMPDU */
send_delba(padapter, 0, psta->hwaddr);/* recipient */
/* tear down TX AMPDU */
send_delba(padapter, 1, psta->hwaddr);/* // originator */
issue_deauth(padapter, psta->hwaddr, reason);
}
psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
/* report_del_sta_event(padapter, psta->hwaddr, reason); */
/* clear cam entry / key */
rtw_clearstakey_cmd(padapter, psta, true);
spin_lock_bh(&psta->lock);
psta->state &= ~_FW_LINKED;
spin_unlock_bh(&psta->lock);
rtw_cfg80211_indicate_sta_disassoc(padapter, psta->hwaddr, reason);
report_del_sta_event(padapter, psta->hwaddr, reason);
beacon_updated = bss_cap_update_on_sta_leave(padapter, psta);
rtw_free_stainfo(padapter, psta);
return beacon_updated;
}
void rtw_sta_flush(struct adapter *padapter)
{
struct list_head *phead, *plist, *tmp;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
return;
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
/* free sta asoc_queue */
list_for_each_safe(plist, tmp, phead) {
psta = list_entry(plist, struct sta_info, asoc_list);
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
/* spin_unlock_bh(&pstapriv->asoc_list_lock); */
ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
/* spin_lock_bh(&pstapriv->asoc_list_lock); */
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
issue_deauth(padapter, bc_addr, WLAN_REASON_DEAUTH_LEAVING);
associated_clients_update(padapter, true);
}
/* called > TSR LEVEL for USB or SDIO Interface*/
void sta_info_update(struct adapter *padapter, struct sta_info *psta)
{
int flags = psta->flags;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
/* update wmm cap. */
if (WLAN_STA_WME & flags)
psta->qos_option = 1;
else
psta->qos_option = 0;
if (pmlmepriv->qospriv.qos_option == 0)
psta->qos_option = 0;
/* update 802.11n ht cap. */
if (WLAN_STA_HT & flags) {
psta->htpriv.ht_option = true;
psta->qos_option = 1;
} else {
psta->htpriv.ht_option = false;
}
if (!pmlmepriv->htpriv.ht_option)
psta->htpriv.ht_option = false;
update_sta_info_apmode(padapter, psta);
}
/* called >= TSR LEVEL for USB or SDIO Interface*/
void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
if (psta->state & _FW_LINKED) {
pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
/* add ratid */
add_RATid(padapter, psta, 0);/* DM_RATR_STA_INIT */
}
}
/* restore hw setting from sw data structures */
void rtw_ap_restore_network(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta;
struct security_priv *psecuritypriv = &(padapter->securitypriv);
struct list_head *phead, *plist;
u8 chk_alive_num = 0;
char chk_alive_list[NUM_STA];
int i;
rtw_setopmode_cmd(padapter, Ndis802_11APMode, false);
set_channel_bwmode(
padapter,
pmlmeext->cur_channel,
pmlmeext->cur_ch_offset,
pmlmeext->cur_bwmode
);
start_bss_network(padapter);
if ((padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_) ||
(padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
/* restore group key, WEP keys is restored in ips_leave() */
rtw_set_key(
padapter,
psecuritypriv,
psecuritypriv->dot118021XGrpKeyid,
0,
false
);
}
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
list_for_each(plist, phead) {
int stainfo_offset;
psta = list_entry(plist, struct sta_info, asoc_list);
stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
if (stainfo_offset_valid(stainfo_offset))
chk_alive_list[chk_alive_num++] = stainfo_offset;
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
for (i = 0; i < chk_alive_num; i++) {
psta = rtw_get_stainfo_by_offset(pstapriv, chk_alive_list[i]);
if (!psta)
continue;
if (psta->state & _FW_LINKED) {
rtw_sta_media_status_rpt(padapter, psta, 1);
Update_RA_Entry(padapter, psta);
/* pairwise key */
/* per sta pairwise key and settings */
if ((psecuritypriv->dot11PrivacyAlgrthm == _TKIP_) ||
(psecuritypriv->dot11PrivacyAlgrthm == _AES_)) {
rtw_setstakey_cmd(padapter, psta, true, false);
}
}
}
}
void start_ap_mode(struct adapter *padapter)
{
int i;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
pmlmepriv->update_bcn = false;
/* init_mlme_ap_info(padapter); */
pmlmeext->bstart_bss = false;
pmlmepriv->num_sta_non_erp = 0;
pmlmepriv->num_sta_no_short_slot_time = 0;
pmlmepriv->num_sta_no_short_preamble = 0;
pmlmepriv->num_sta_ht_no_gf = 0;
pmlmepriv->num_sta_no_ht = 0;
pmlmepriv->num_sta_ht_20mhz = 0;
pmlmepriv->olbc = false;
pmlmepriv->olbc_ht = false;
pmlmepriv->ht_op_mode = 0;
for (i = 0; i < NUM_STA; i++)
pstapriv->sta_aid[i] = NULL;
pmlmepriv->wps_beacon_ie = NULL;
pmlmepriv->wps_probe_resp_ie = NULL;
pmlmepriv->wps_assoc_resp_ie = NULL;
pmlmepriv->p2p_beacon_ie = NULL;
pmlmepriv->p2p_probe_resp_ie = NULL;
/* for ACL */
INIT_LIST_HEAD(&(pacl_list->acl_node_q.queue));
pacl_list->num = 0;
pacl_list->mode = 0;
for (i = 0; i < NUM_ACL; i++) {
INIT_LIST_HEAD(&pacl_list->aclnode[i].list);
pacl_list->aclnode[i].valid = false;
}
}
void stop_ap_mode(struct adapter *padapter)
{
struct list_head *phead, *plist, *tmp;
struct rtw_wlan_acl_node *paclnode;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
pmlmepriv->update_bcn = false;
pmlmeext->bstart_bss = false;
/* reset and init security priv , this can refine with rtw_reset_securitypriv */
memset(
(unsigned char *)&padapter->securitypriv,
0,
sizeof(struct security_priv)
);
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
/* for ACL */
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
list_for_each_safe(plist, tmp, phead) {
paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
if (paclnode->valid) {
paclnode->valid = false;
list_del_init(&paclnode->list);
pacl_list->num--;
}
}
spin_unlock_bh(&(pacl_node_q->lock));
rtw_sta_flush(padapter);
/* free_assoc_sta_resources */
rtw_free_all_stainfo(padapter);
psta = rtw_get_bcmc_stainfo(padapter);
rtw_free_stainfo(padapter, psta);
rtw_init_bcmc_stainfo(padapter);
rtw_free_mlme_priv_ie_data(pmlmepriv);
rtw_btcoex_MediaStatusNotify(padapter, 0); /* disconnect */
}
| linux-master | drivers/staging/rtl8723bs/core/rtw_ap.c |
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
#include <drv_types.h>
#include <rtw_debug.h>
#include <linux/jiffies.h>
#include <rtw_recv.h>
#include <net/cfg80211.h>
#include <asm/unaligned.h>
static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
static void rtw_signal_stat_timer_hdl(struct timer_list *t);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
memset((u8 *)psta_recvpriv, 0, sizeof(struct sta_recv_priv));
spin_lock_init(&psta_recvpriv->lock);
/* for (i = 0; i<MAX_RX_NUMBLKS; i++) */
/* _rtw_init_queue(&psta_recvpriv->blk_strms[i]); */
INIT_LIST_HEAD(&psta_recvpriv->defrag_q.queue);
spin_lock_init(&psta_recvpriv->defrag_q.lock);
}
signed int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
{
signed int i;
union recv_frame *precvframe;
signed int res = _SUCCESS;
spin_lock_init(&precvpriv->lock);
INIT_LIST_HEAD(&precvpriv->free_recv_queue.queue);
spin_lock_init(&precvpriv->free_recv_queue.lock);
INIT_LIST_HEAD(&precvpriv->recv_pending_queue.queue);
spin_lock_init(&precvpriv->recv_pending_queue.lock);
INIT_LIST_HEAD(&precvpriv->uc_swdec_pending_queue.queue);
spin_lock_init(&precvpriv->uc_swdec_pending_queue.lock);
precvpriv->adapter = padapter;
precvpriv->free_recvframe_cnt = NR_RECVFRAME;
precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
if (!precvpriv->pallocated_frame_buf) {
res = _FAIL;
goto exit;
}
precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
/* precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf + RXFRAME_ALIGN_SZ - */
/* ((SIZE_PTR) (precvpriv->pallocated_frame_buf) &(RXFRAME_ALIGN_SZ-1)); */
precvframe = (union recv_frame *) precvpriv->precv_frame_buf;
for (i = 0; i < NR_RECVFRAME; i++) {
INIT_LIST_HEAD(&(precvframe->u.list));
list_add_tail(&(precvframe->u.list), &(precvpriv->free_recv_queue.queue));
rtw_os_recv_resource_alloc(padapter, precvframe);
precvframe->u.hdr.len = 0;
precvframe->u.hdr.adapter = padapter;
precvframe++;
}
res = rtw_hal_init_recv_priv(padapter);
timer_setup(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl,
0);
precvpriv->signal_stat_sampling_interval = 2000; /* ms */
rtw_set_signal_stat_timer(precvpriv);
exit:
return res;
}
void _rtw_free_recv_priv(struct recv_priv *precvpriv)
{
struct adapter *padapter = precvpriv->adapter;
rtw_free_uc_swdec_pending_queue(padapter);
rtw_os_recv_resource_free(precvpriv);
vfree(precvpriv->pallocated_frame_buf);
rtw_hal_free_recv_priv(padapter);
}
union recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
{
union recv_frame *precvframe;
struct list_head *plist, *phead;
struct adapter *padapter;
struct recv_priv *precvpriv;
if (list_empty(&pfree_recv_queue->queue))
precvframe = NULL;
else {
phead = get_list_head(pfree_recv_queue);
plist = get_next(phead);
precvframe = (union recv_frame *)plist;
list_del_init(&precvframe->u.hdr.list);
padapter = precvframe->u.hdr.adapter;
if (padapter) {
precvpriv = &padapter->recvpriv;
if (pfree_recv_queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt--;
}
}
return precvframe;
}
union recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
{
union recv_frame *precvframe;
spin_lock_bh(&pfree_recv_queue->lock);
precvframe = _rtw_alloc_recvframe(pfree_recv_queue);
spin_unlock_bh(&pfree_recv_queue->lock);
return precvframe;
}
int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue)
{
struct adapter *padapter = precvframe->u.hdr.adapter;
struct recv_priv *precvpriv = &padapter->recvpriv;
rtw_os_free_recvframe(precvframe);
spin_lock_bh(&pfree_recv_queue->lock);
list_del_init(&(precvframe->u.hdr.list));
precvframe->u.hdr.len = 0;
list_add_tail(&(precvframe->u.hdr.list), get_list_head(pfree_recv_queue));
if (padapter) {
if (pfree_recv_queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt++;
}
spin_unlock_bh(&pfree_recv_queue->lock);
return _SUCCESS;
}
signed int _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
{
struct adapter *padapter = precvframe->u.hdr.adapter;
struct recv_priv *precvpriv = &padapter->recvpriv;
/* INIT_LIST_HEAD(&(precvframe->u.hdr.list)); */
list_del_init(&(precvframe->u.hdr.list));
list_add_tail(&(precvframe->u.hdr.list), get_list_head(queue));
if (padapter)
if (queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt++;
return _SUCCESS;
}
signed int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
{
signed int ret;
/* _spinlock(&pfree_recv_queue->lock); */
spin_lock_bh(&queue->lock);
ret = _rtw_enqueue_recvframe(precvframe, queue);
/* spin_unlock(&pfree_recv_queue->lock); */
spin_unlock_bh(&queue->lock);
return ret;
}
/*
* caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
* pframequeue: defrag_queue : will be accessed in recv_thread (passive)
*
* using spinlock to protect
*
*/
void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue)
{
union recv_frame *precvframe;
struct list_head *plist, *phead;
spin_lock(&pframequeue->lock);
phead = get_list_head(pframequeue);
plist = get_next(phead);
while (phead != plist) {
precvframe = (union recv_frame *)plist;
plist = get_next(plist);
rtw_free_recvframe(precvframe, pfree_recv_queue);
}
spin_unlock(&pframequeue->lock);
}
u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
{
u32 cnt = 0;
union recv_frame *pending_frame;
while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
cnt++;
}
return cnt;
}
signed int rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue)
{
spin_lock_bh(&queue->lock);
list_del_init(&precvbuf->list);
list_add(&precvbuf->list, get_list_head(queue));
spin_unlock_bh(&queue->lock);
return _SUCCESS;
}
signed int rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue)
{
spin_lock_bh(&queue->lock);
list_del_init(&precvbuf->list);
list_add_tail(&precvbuf->list, get_list_head(queue));
spin_unlock_bh(&queue->lock);
return _SUCCESS;
}
struct recv_buf *rtw_dequeue_recvbuf(struct __queue *queue)
{
struct recv_buf *precvbuf;
struct list_head *plist, *phead;
spin_lock_bh(&queue->lock);
if (list_empty(&queue->queue))
precvbuf = NULL;
else {
phead = get_list_head(queue);
plist = get_next(phead);
precvbuf = container_of(plist, struct recv_buf, list);
list_del_init(&precvbuf->list);
}
spin_unlock_bh(&queue->lock);
return precvbuf;
}
static signed int recvframe_chkmic(struct adapter *adapter, union recv_frame *precvframe)
{
signed int i, res = _SUCCESS;
u32 datalen;
u8 miccode[8];
u8 bmic_err = false, brpt_micerror = true;
u8 *pframe, *payload, *pframemic;
u8 *mickey;
/* u8 *iv, rxdata_key_idx = 0; */
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &precvframe->u.hdr.attrib;
struct security_priv *psecuritypriv = &adapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
if (prxattrib->encrypt == _TKIP_) {
/* calculate mic code */
if (stainfo) {
if (is_multicast_ether_addr(prxattrib->ra)) {
/* mickey =&psecuritypriv->dot118021XGrprxmickey.skey[0]; */
/* iv = precvframe->u.hdr.rx_data+prxattrib->hdrlen; */
/* rxdata_key_idx =(((iv[3])>>6)&0x3) ; */
mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
/* psecuritypriv->dot118021XGrpKeyid, pmlmeinfo->key_index, rxdata_key_idx); */
if (psecuritypriv->binstallGrpkey == false) {
res = _FAIL;
goto exit;
}
} else {
mickey = &stainfo->dot11tkiprxmickey.skey[0];
}
datalen = precvframe->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len-prxattrib->icv_len-8;/* icv_len included the mic code */
pframe = precvframe->u.hdr.rx_data;
payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0], (unsigned char)prxattrib->priority); /* care the length of the data */
pframemic = payload+datalen;
bmic_err = false;
for (i = 0; i < 8; i++) {
if (miccode[i] != *(pframemic + i))
bmic_err = true;
}
if (bmic_err == true) {
/* double check key_index for some timing issue , */
/* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
if ((is_multicast_ether_addr(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
brpt_micerror = false;
if (prxattrib->bdecrypted && brpt_micerror)
rtw_handle_tkip_mic_err(adapter, (u8)is_multicast_ether_addr(prxattrib->ra));
res = _FAIL;
} else {
/* mic checked ok */
if (!psecuritypriv->bcheck_grpkey &&
is_multicast_ether_addr(prxattrib->ra))
psecuritypriv->bcheck_grpkey = true;
}
}
recvframe_pull_tail(precvframe, 8);
}
exit:
return res;
}
/* decrypt and set the ivlen, icvlen of the recv_frame */
static union recv_frame *decryptor(struct adapter *padapter, union recv_frame *precv_frame)
{
struct rx_pkt_attrib *prxattrib = &precv_frame->u.hdr.attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
union recv_frame *return_packet = precv_frame;
u32 res = _SUCCESS;
if (prxattrib->encrypt > 0) {
u8 *iv = precv_frame->u.hdr.rx_data+prxattrib->hdrlen;
prxattrib->key_index = (((iv[3])>>6)&0x3);
if (prxattrib->key_index > WEP_KEYS) {
switch (prxattrib->encrypt) {
case _WEP40_:
case _WEP104_:
prxattrib->key_index = psecuritypriv->dot11PrivacyKeyIndex;
break;
case _TKIP_:
case _AES_:
default:
prxattrib->key_index = psecuritypriv->dot118021XGrpKeyid;
break;
}
}
}
if ((prxattrib->encrypt > 0) && ((prxattrib->bdecrypted == 0) || (psecuritypriv->sw_decrypt == true))) {
psecuritypriv->hw_decrypted = false;
switch (prxattrib->encrypt) {
case _WEP40_:
case _WEP104_:
rtw_wep_decrypt(padapter, (u8 *)precv_frame);
break;
case _TKIP_:
res = rtw_tkip_decrypt(padapter, (u8 *)precv_frame);
break;
case _AES_:
res = rtw_aes_decrypt(padapter, (u8 *)precv_frame);
break;
default:
break;
}
} else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 &&
(psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)
) {
psecuritypriv->hw_decrypted = true;
} else {
}
if (res == _FAIL) {
rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue);
return_packet = NULL;
} else
prxattrib->bdecrypted = true;
return return_packet;
}
/* set the security information in the recv_frame */
static union recv_frame *portctrl(struct adapter *adapter, union recv_frame *precv_frame)
{
u8 *psta_addr = NULL;
u8 *ptr;
uint auth_alg;
struct recv_frame_hdr *pfhdr;
struct sta_info *psta;
struct sta_priv *pstapriv;
union recv_frame *prtnframe;
u16 ether_type = 0;
u16 eapol_type = 0x888e;/* for Funia BD's WPA issue */
struct rx_pkt_attrib *pattrib;
pstapriv = &adapter->stapriv;
auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
ptr = precv_frame->u.hdr.rx_data;
pfhdr = &precv_frame->u.hdr;
pattrib = &pfhdr->attrib;
psta_addr = pattrib->ta;
prtnframe = NULL;
psta = rtw_get_stainfo(pstapriv, psta_addr);
if (auth_alg == 2) {
if ((psta) && (psta->ieee8021x_blocked)) {
__be16 be_tmp;
/* blocked */
/* only accept EAPOL frame */
prtnframe = precv_frame;
/* get ether_type */
ptr = ptr + pfhdr->attrib.hdrlen + pfhdr->attrib.iv_len + LLC_HEADER_LENGTH;
memcpy(&be_tmp, ptr, 2);
ether_type = ntohs(be_tmp);
if (ether_type == eapol_type)
prtnframe = precv_frame;
else {
/* free this frame */
rtw_free_recvframe(precv_frame, &adapter->recvpriv.free_recv_queue);
prtnframe = NULL;
}
} else {
/* allowed */
/* check decryption status, and decrypt the frame if needed */
prtnframe = precv_frame;
/* check is the EAPOL frame or not (Rekey) */
/* if (ether_type == eapol_type) { */
/* check Rekey */
/* prtnframe =precv_frame; */
/* */
/* else { */
/* */
}
} else
prtnframe = precv_frame;
return prtnframe;
}
static signed int recv_decache(union recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache)
{
signed int tid = precv_frame->u.hdr.attrib.priority;
u16 seq_ctrl = ((precv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
(precv_frame->u.hdr.attrib.frag_num & 0xf);
if (tid > 15)
return _FAIL;
if (1) { /* if (bretry) */
if (seq_ctrl == prxcache->tid_rxseq[tid])
return _FAIL;
}
prxcache->tid_rxseq[tid] = seq_ctrl;
return _SUCCESS;
}
static void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame)
{
unsigned char pwrbit;
u8 *ptr = precv_frame->u.hdr.rx_data;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta = NULL;
psta = rtw_get_stainfo(pstapriv, pattrib->src);
pwrbit = GetPwrMgt(ptr);
if (psta) {
if (pwrbit) {
if (!(psta->state & WIFI_SLEEP_STATE)) {
/* psta->state |= WIFI_SLEEP_STATE; */
/* pstapriv->sta_dz_bitmap |= BIT(psta->aid); */
stop_sta_xmit(padapter, psta);
}
} else {
if (psta->state & WIFI_SLEEP_STATE) {
/* psta->state ^= WIFI_SLEEP_STATE; */
/* pstapriv->sta_dz_bitmap &= ~BIT(psta->aid); */
wakeup_sta_to_xmit(padapter, psta);
}
}
}
}
static void process_wmmps_data(struct adapter *padapter, union recv_frame *precv_frame)
{
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta = NULL;
psta = rtw_get_stainfo(pstapriv, pattrib->src);
if (!psta)
return;
if (!psta->qos_option)
return;
if (!(psta->qos_info&0xf))
return;
if (psta->state&WIFI_SLEEP_STATE) {
u8 wmmps_ac = 0;
switch (pattrib->priority) {
case 1:
case 2:
wmmps_ac = psta->uapsd_bk&BIT(1);
break;
case 4:
case 5:
wmmps_ac = psta->uapsd_vi&BIT(1);
break;
case 6:
case 7:
wmmps_ac = psta->uapsd_vo&BIT(1);
break;
case 0:
case 3:
default:
wmmps_ac = psta->uapsd_be&BIT(1);
break;
}
if (wmmps_ac) {
if (psta->sleepq_ac_len > 0)
/* process received triggered frame */
xmit_delivery_enabled_frames(padapter, psta);
else
/* issue one qos null frame with More data bit = 0 and the EOSP bit set (= 1) */
issue_qos_nulldata(padapter, psta->hwaddr, (u16)pattrib->priority, 0, 0);
}
}
}
static void count_rx_stats(struct adapter *padapter, union recv_frame *prframe, struct sta_info *sta)
{
int sz;
struct sta_info *psta = NULL;
struct stainfo_stats *pstats = NULL;
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
struct recv_priv *precvpriv = &padapter->recvpriv;
sz = get_recvframe_len(prframe);
precvpriv->rx_bytes += sz;
padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
if ((!is_broadcast_ether_addr(pattrib->dst)) && (!is_multicast_ether_addr(pattrib->dst)))
padapter->mlmepriv.LinkDetectInfo.NumRxUnicastOkInPeriod++;
if (sta)
psta = sta;
else
psta = prframe->u.hdr.psta;
if (psta) {
pstats = &psta->sta_stats;
pstats->rx_data_pkts++;
pstats->rx_bytes += sz;
}
traffic_check_for_leave_lps(padapter, false, 0);
}
static signed int sta2sta_data_frame(struct adapter *adapter, union recv_frame *precv_frame,
struct sta_info **psta)
{
u8 *ptr = precv_frame->u.hdr.rx_data;
signed int ret = _SUCCESS;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *mybssid = get_bssid(pmlmepriv);
u8 *myhwaddr = myid(&adapter->eeprompriv);
u8 *sta_addr = NULL;
signed int bmcast = is_multicast_ether_addr(pattrib->dst);
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
/* filter packets that SA is myself or multicast or broadcast */
if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
ret = _FAIL;
goto exit;
}
if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
ret = _FAIL;
goto exit;
}
if (is_zero_ether_addr(pattrib->bssid) ||
is_zero_ether_addr(mybssid) ||
(memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
ret = _FAIL;
goto exit;
}
sta_addr = pattrib->src;
} else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
/* For Station mode, sa and bssid should always be BSSID, and DA is my mac-address */
if (memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) {
ret = _FAIL;
goto exit;
}
sta_addr = pattrib->bssid;
} else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
if (bmcast) {
/* For AP mode, if DA == MCAST, then BSSID should be also MCAST */
if (!is_multicast_ether_addr(pattrib->bssid)) {
ret = _FAIL;
goto exit;
}
} else { /* not mc-frame */
/* For AP mode, if DA is non-MCAST, then it must be BSSID, and bssid == BSSID */
if (memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) {
ret = _FAIL;
goto exit;
}
sta_addr = pattrib->src;
}
} else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) {
memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
sta_addr = mybssid;
} else
ret = _FAIL;
if (bmcast)
*psta = rtw_get_bcmc_stainfo(adapter);
else
*psta = rtw_get_stainfo(pstapriv, sta_addr); /* get ap_info */
if (!*psta) {
ret = _FAIL;
goto exit;
}
exit:
return ret;
}
static signed int ap2sta_data_frame(struct adapter *adapter, union recv_frame *precv_frame,
struct sta_info **psta)
{
u8 *ptr = precv_frame->u.hdr.rx_data;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
signed int ret = _SUCCESS;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *mybssid = get_bssid(pmlmepriv);
u8 *myhwaddr = myid(&adapter->eeprompriv);
signed int bmcast = is_multicast_ether_addr(pattrib->dst);
if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) &&
(check_fwstate(pmlmepriv, _FW_LINKED) == true ||
check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true)
) {
/* filter packets that SA is myself or multicast or broadcast */
if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
ret = _FAIL;
goto exit;
}
/* da should be for me */
if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
ret = _FAIL;
goto exit;
}
/* check BSSID */
if (is_zero_ether_addr(pattrib->bssid) ||
is_zero_ether_addr(mybssid) ||
(memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
if (!bmcast)
issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
ret = _FAIL;
goto exit;
}
if (bmcast)
*psta = rtw_get_bcmc_stainfo(adapter);
else
*psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get ap_info */
if (!*psta) {
ret = _FAIL;
goto exit;
}
if (GetFrameSubType(ptr) & BIT(6)) {
/* No data, will not indicate to upper layer, temporily count it here */
count_rx_stats(adapter, precv_frame, *psta);
ret = RTW_RX_HANDLED;
goto exit;
}
} else if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) &&
(check_fwstate(pmlmepriv, _FW_LINKED) == true)) {
memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
/* */
memcpy(pattrib->bssid, mybssid, ETH_ALEN);
*psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
if (!*psta) {
ret = _FAIL;
goto exit;
}
} else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
/* Special case */
ret = RTW_RX_HANDLED;
goto exit;
} else {
if (!memcmp(myhwaddr, pattrib->dst, ETH_ALEN) && (!bmcast)) {
*psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
if (!*psta) {
/* for AP multicast issue , modify by yiwei */
static unsigned long send_issue_deauth_time;
if (jiffies_to_msecs(jiffies - send_issue_deauth_time) > 10000 || send_issue_deauth_time == 0) {
send_issue_deauth_time = jiffies;
issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
}
}
}
ret = _FAIL;
}
exit:
return ret;
}
static signed int sta2ap_data_frame(struct adapter *adapter, union recv_frame *precv_frame,
struct sta_info **psta)
{
u8 *ptr = precv_frame->u.hdr.rx_data;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
unsigned char *mybssid = get_bssid(pmlmepriv);
signed int ret = _SUCCESS;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
/* For AP mode, RA =BSSID, TX =STA(SRC_ADDR), A3 =DST_ADDR */
if (memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
ret = _FAIL;
goto exit;
}
*psta = rtw_get_stainfo(pstapriv, pattrib->src);
if (!*psta) {
issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
ret = RTW_RX_HANDLED;
goto exit;
}
process_pwrbit_data(adapter, precv_frame);
if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE)
process_wmmps_data(adapter, precv_frame);
if (GetFrameSubType(ptr) & BIT(6)) {
/* No data, will not indicate to upper layer, temporily count it here */
count_rx_stats(adapter, precv_frame, *psta);
ret = RTW_RX_HANDLED;
goto exit;
}
} else {
u8 *myhwaddr = myid(&adapter->eeprompriv);
if (memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
ret = RTW_RX_HANDLED;
goto exit;
}
issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
ret = RTW_RX_HANDLED;
goto exit;
}
exit:
return ret;
}
static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_frame)
{
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->u.hdr.rx_data;
struct sta_info *psta = NULL;
/* uint len = precv_frame->u.hdr.len; */
if (GetFrameType(pframe) != WIFI_CTRL_TYPE)
return _FAIL;
/* receive the frames that ra(a1) is my address */
if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
return _FAIL;
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (!psta)
return _FAIL;
/* for rx pkt statistics */
psta->sta_stats.rx_ctrl_pkts++;
/* only handle ps-poll */
if (GetFrameSubType(pframe) == WIFI_PSPOLL) {
u16 aid;
u8 wmmps_ac = 0;
aid = GetAid(pframe);
if (psta->aid != aid)
return _FAIL;
switch (pattrib->priority) {
case 1:
case 2:
wmmps_ac = psta->uapsd_bk&BIT(0);
break;
case 4:
case 5:
wmmps_ac = psta->uapsd_vi&BIT(0);
break;
case 6:
case 7:
wmmps_ac = psta->uapsd_vo&BIT(0);
break;
case 0:
case 3:
default:
wmmps_ac = psta->uapsd_be&BIT(0);
break;
}
if (wmmps_ac)
return _FAIL;
if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
psta->expire_to = pstapriv->expire_to;
psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
}
if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
/* spin_lock_bh(&psta->sleep_q.lock); */
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
if (xmitframe_phead != xmitframe_plist) {
pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
xmitframe_plist = get_next(xmitframe_plist);
list_del_init(&pxmitframe->list);
psta->sleepq_len--;
if (psta->sleepq_len > 0)
pxmitframe->attrib.mdata = 1;
else
pxmitframe->attrib.mdata = 0;
pxmitframe->attrib.triggered = 1;
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
if (psta->sleepq_len == 0) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
/* update BCN for TIM IE */
/* update_BCNTIM(padapter); */
update_beacon(padapter, WLAN_EID_TIM, NULL, true);
}
/* spin_unlock_bh(&psta->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
} else {
/* spin_unlock_bh(&psta->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
if (pstapriv->tim_bitmap&BIT(psta->aid)) {
if (psta->sleepq_len == 0) {
/* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
issue_nulldata_in_interrupt(padapter, psta->hwaddr);
} else {
psta->sleepq_len = 0;
}
pstapriv->tim_bitmap &= ~BIT(psta->aid);
/* update BCN for TIM IE */
/* update_BCNTIM(padapter); */
update_beacon(padapter, WLAN_EID_TIM, NULL, true);
}
}
}
}
return _FAIL;
}
/* perform defrag */
static union recv_frame *recvframe_defrag(struct adapter *adapter,
struct __queue *defrag_q)
{
struct list_head *plist, *phead;
u8 wlanhdr_offset;
u8 curfragnum;
struct recv_frame_hdr *pfhdr, *pnfhdr;
union recv_frame *prframe, *pnextrframe;
struct __queue *pfree_recv_queue;
curfragnum = 0;
pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
phead = get_list_head(defrag_q);
plist = get_next(phead);
prframe = (union recv_frame *)plist;
pfhdr = &prframe->u.hdr;
list_del_init(&(prframe->u.list));
if (curfragnum != pfhdr->attrib.frag_num) {
/* the first fragment number must be 0 */
/* free the whole queue */
rtw_free_recvframe(prframe, pfree_recv_queue);
rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
return NULL;
}
curfragnum++;
plist = get_list_head(defrag_q);
plist = get_next(plist);
while (phead != plist) {
pnextrframe = (union recv_frame *)plist;
pnfhdr = &pnextrframe->u.hdr;
/* check the fragment sequence (2nd ~n fragment frame) */
if (curfragnum != pnfhdr->attrib.frag_num) {
/* the fragment number must be increasing (after decache) */
/* release the defrag_q & prframe */
rtw_free_recvframe(prframe, pfree_recv_queue);
rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
return NULL;
}
curfragnum++;
/* copy the 2nd~n fragment frame's payload to the first fragment */
/* get the 2nd~last fragment frame's payload */
wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
recvframe_pull(pnextrframe, wlanhdr_offset);
/* append to first fragment frame's tail (if privacy frame, pull the ICV) */
recvframe_pull_tail(prframe, pfhdr->attrib.icv_len);
/* memcpy */
memcpy(pfhdr->rx_tail, pnfhdr->rx_data, pnfhdr->len);
recvframe_put(prframe, pnfhdr->len);
pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
plist = get_next(plist);
}
/* free the defrag_q queue and return the prframe */
rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
return prframe;
}
/* check if need to defrag, if needed queue the frame to defrag_q */
static union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union recv_frame *precv_frame)
{
u8 ismfrag;
u8 fragnum;
u8 *psta_addr;
struct recv_frame_hdr *pfhdr;
struct sta_info *psta;
struct sta_priv *pstapriv;
struct list_head *phead;
union recv_frame *prtnframe = NULL;
struct __queue *pfree_recv_queue, *pdefrag_q;
pstapriv = &padapter->stapriv;
pfhdr = &precv_frame->u.hdr;
pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
/* need to define struct of wlan header frame ctrl */
ismfrag = pfhdr->attrib.mfrag;
fragnum = pfhdr->attrib.frag_num;
psta_addr = pfhdr->attrib.ta;
psta = rtw_get_stainfo(pstapriv, psta_addr);
if (!psta) {
u8 type = GetFrameType(pfhdr->rx_data);
if (type != WIFI_DATA_TYPE) {
psta = rtw_get_bcmc_stainfo(padapter);
pdefrag_q = &psta->sta_recvpriv.defrag_q;
} else
pdefrag_q = NULL;
} else
pdefrag_q = &psta->sta_recvpriv.defrag_q;
if ((ismfrag == 0) && (fragnum == 0))
prtnframe = precv_frame;/* isn't a fragment frame */
if (ismfrag == 1) {
/* 0~(n-1) fragment frame */
/* enqueue to defraf_g */
if (pdefrag_q) {
if (fragnum == 0)
/* the first fragment */
if (!list_empty(&pdefrag_q->queue))
/* free current defrag_q */
rtw_free_recvframe_queue(pdefrag_q, pfree_recv_queue);
/* Then enqueue the 0~(n-1) fragment into the defrag_q */
/* spin_lock(&pdefrag_q->lock); */
phead = get_list_head(pdefrag_q);
list_add_tail(&pfhdr->list, phead);
/* spin_unlock(&pdefrag_q->lock); */
prtnframe = NULL;
} else {
/* can't find this ta's defrag_queue, so free this recv_frame */
rtw_free_recvframe(precv_frame, pfree_recv_queue);
prtnframe = NULL;
}
}
if ((ismfrag == 0) && (fragnum != 0)) {
/* the last fragment frame */
/* enqueue the last fragment */
if (pdefrag_q) {
/* spin_lock(&pdefrag_q->lock); */
phead = get_list_head(pdefrag_q);
list_add_tail(&pfhdr->list, phead);
/* spin_unlock(&pdefrag_q->lock); */
/* call recvframe_defrag to defrag */
precv_frame = recvframe_defrag(padapter, pdefrag_q);
prtnframe = precv_frame;
} else {
/* can't find this ta's defrag_queue, so free this recv_frame */
rtw_free_recvframe(precv_frame, pfree_recv_queue);
prtnframe = NULL;
}
}
if ((prtnframe) && (prtnframe->u.hdr.attrib.privacy)) {
/* after defrag we must check tkip mic code */
if (recvframe_chkmic(padapter, prtnframe) == _FAIL) {
rtw_free_recvframe(prtnframe, pfree_recv_queue);
prtnframe = NULL;
}
}
return prtnframe;
}
static signed int validate_recv_mgnt_frame(struct adapter *padapter, union recv_frame *precv_frame)
{
/* struct mlme_priv *pmlmepriv = &adapter->mlmepriv; */
precv_frame = recvframe_chk_defrag(padapter, precv_frame);
if (!precv_frame)
return _SUCCESS;
{
/* for rx pkt statistics */
struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->u.hdr.rx_data));
if (psta) {
psta->sta_stats.rx_mgnt_pkts++;
if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_BEACON)
psta->sta_stats.rx_beacon_pkts++;
else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBEREQ)
psta->sta_stats.rx_probereq_pkts++;
else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBERSP) {
if (!memcmp(padapter->eeprompriv.mac_addr, GetAddr1Ptr(precv_frame->u.hdr.rx_data), ETH_ALEN))
psta->sta_stats.rx_probersp_pkts++;
else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)) ||
is_multicast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)))
psta->sta_stats.rx_probersp_bm_pkts++;
else
psta->sta_stats.rx_probersp_uo_pkts++;
}
}
}
mgt_dispatcher(padapter, precv_frame);
return _SUCCESS;
}
static signed int validate_recv_data_frame(struct adapter *adapter, union recv_frame *precv_frame)
{
u8 bretry;
u8 *psa, *pda, *pbssid;
struct sta_info *psta = NULL;
u8 *ptr = precv_frame->u.hdr.rx_data;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
struct security_priv *psecuritypriv = &adapter->securitypriv;
signed int ret = _SUCCESS;
bretry = GetRetry(ptr);
pda = get_da(ptr);
psa = get_sa(ptr);
pbssid = get_hdr_bssid(ptr);
if (!pbssid) {
ret = _FAIL;
goto exit;
}
memcpy(pattrib->dst, pda, ETH_ALEN);
memcpy(pattrib->src, psa, ETH_ALEN);
memcpy(pattrib->bssid, pbssid, ETH_ALEN);
switch (pattrib->to_fr_ds) {
case 0:
memcpy(pattrib->ra, pda, ETH_ALEN);
memcpy(pattrib->ta, psa, ETH_ALEN);
ret = sta2sta_data_frame(adapter, precv_frame, &psta);
break;
case 1:
memcpy(pattrib->ra, pda, ETH_ALEN);
memcpy(pattrib->ta, pbssid, ETH_ALEN);
ret = ap2sta_data_frame(adapter, precv_frame, &psta);
break;
case 2:
memcpy(pattrib->ra, pbssid, ETH_ALEN);
memcpy(pattrib->ta, psa, ETH_ALEN);
ret = sta2ap_data_frame(adapter, precv_frame, &psta);
break;
case 3:
memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
ret = _FAIL;
break;
default:
ret = _FAIL;
break;
}
if (ret == _FAIL) {
goto exit;
} else if (ret == RTW_RX_HANDLED) {
goto exit;
}
if (!psta) {
ret = _FAIL;
goto exit;
}
/* psta->rssi = prxcmd->rssi; */
/* psta->signal_quality = prxcmd->sq; */
precv_frame->u.hdr.psta = psta;
pattrib->amsdu = 0;
pattrib->ack_policy = 0;
/* parsing QC field */
if (pattrib->qos == 1) {
pattrib->priority = GetPriority((ptr + 24));
pattrib->ack_policy = GetAckpolicy((ptr + 24));
pattrib->amsdu = GetAMsdu((ptr + 24));
pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 32 : 26;
if (pattrib->priority != 0 && pattrib->priority != 3)
adapter->recvpriv.bIsAnyNonBEPkts = true;
} else {
pattrib->priority = 0;
pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 30 : 24;
}
if (pattrib->order)/* HT-CTRL 11n */
pattrib->hdrlen += 4;
precv_frame->u.hdr.preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
/* decache, drop duplicate recv packets */
if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) {
ret = _FAIL;
goto exit;
}
if (pattrib->privacy) {
GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, is_multicast_ether_addr(pattrib->ra));
SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
} else {
pattrib->encrypt = 0;
pattrib->iv_len = pattrib->icv_len = 0;
}
exit:
return ret;
}
static signed int validate_80211w_mgmt(struct adapter *adapter, union recv_frame *precv_frame)
{
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
u8 *ptr = precv_frame->u.hdr.rx_data;
u8 subtype;
subtype = GetFrameSubType(ptr); /* bit(7)~bit(2) */
/* only support station mode */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) && check_fwstate(pmlmepriv, _FW_LINKED) &&
adapter->securitypriv.binstallBIPkey == true) {
/* unicast management frame decrypt */
if (pattrib->privacy && !(is_multicast_ether_addr(GetAddr1Ptr(ptr))) &&
(subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC || subtype == WIFI_ACTION)) {
u8 *mgmt_DATA;
u32 data_len = 0;
pattrib->bdecrypted = 0;
pattrib->encrypt = _AES_;
pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
/* set iv and icv length */
SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
/* actual management data frame body */
data_len = pattrib->pkt_len - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
mgmt_DATA = rtw_zmalloc(data_len);
if (!mgmt_DATA)
goto validate_80211w_fail;
precv_frame = decryptor(adapter, precv_frame);
/* save actual management data frame body */
memcpy(mgmt_DATA, ptr+pattrib->hdrlen+pattrib->iv_len, data_len);
/* overwrite the iv field */
memcpy(ptr+pattrib->hdrlen, mgmt_DATA, data_len);
/* remove the iv and icv length */
pattrib->pkt_len = pattrib->pkt_len - pattrib->iv_len - pattrib->icv_len;
kfree(mgmt_DATA);
if (!precv_frame)
goto validate_80211w_fail;
} else if (is_multicast_ether_addr(GetAddr1Ptr(ptr)) &&
(subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC)) {
signed int BIP_ret = _SUCCESS;
/* verify BIP MME IE of broadcast/multicast de-auth/disassoc packet */
BIP_ret = rtw_BIP_verify(adapter, (u8 *)precv_frame);
if (BIP_ret == _FAIL) {
goto validate_80211w_fail;
} else if (BIP_ret == RTW_RX_HANDLED) {
/* issue sa query request */
issue_action_SA_Query(adapter, NULL, 0, 0);
goto validate_80211w_fail;
}
} else { /* 802.11w protect */
if (subtype == WIFI_ACTION) {
/* according 802.11-2012 standard, these five types are not robust types */
if (ptr[WLAN_HDR_A3_LEN] != RTW_WLAN_CATEGORY_PUBLIC &&
ptr[WLAN_HDR_A3_LEN] != RTW_WLAN_CATEGORY_HT &&
ptr[WLAN_HDR_A3_LEN] != RTW_WLAN_CATEGORY_UNPROTECTED_WNM &&
ptr[WLAN_HDR_A3_LEN] != RTW_WLAN_CATEGORY_SELF_PROTECTED &&
ptr[WLAN_HDR_A3_LEN] != RTW_WLAN_CATEGORY_P2P) {
goto validate_80211w_fail;
}
} else if (subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC) {
/* issue sa query request */
issue_action_SA_Query(adapter, NULL, 0, 0);
goto validate_80211w_fail;
}
}
}
return _SUCCESS;
validate_80211w_fail:
return _FAIL;
}
static signed int validate_recv_frame(struct adapter *adapter, union recv_frame *precv_frame)
{
/* shall check frame subtype, to / from ds, da, bssid */
/* then call check if rx seq/frag. duplicated. */
u8 type;
u8 subtype;
signed int retval = _SUCCESS;
u8 bDumpRxPkt;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
u8 *ptr = precv_frame->u.hdr.rx_data;
u8 ver = (unsigned char) (*ptr)&0x3;
/* add version chk */
if (ver != 0) {
retval = _FAIL;
goto exit;
}
type = GetFrameType(ptr);
subtype = GetFrameSubType(ptr); /* bit(7)~bit(2) */
pattrib->to_fr_ds = get_tofr_ds(ptr);
pattrib->frag_num = GetFragNum(ptr);
pattrib->seq_num = GetSequence(ptr);
pattrib->pw_save = GetPwrMgt(ptr);
pattrib->mfrag = GetMFrag(ptr);
pattrib->mdata = GetMData(ptr);
pattrib->privacy = GetPrivacy(ptr);
pattrib->order = GetOrder(ptr);
rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt));
switch (type) {
case WIFI_MGT_TYPE: /* mgnt */
if (validate_80211w_mgmt(adapter, precv_frame) == _FAIL) {
retval = _FAIL;
break;
}
retval = validate_recv_mgnt_frame(adapter, precv_frame);
retval = _FAIL; /* only data frame return _SUCCESS */
break;
case WIFI_CTRL_TYPE: /* ctrl */
retval = validate_recv_ctrl_frame(adapter, precv_frame);
retval = _FAIL; /* only data frame return _SUCCESS */
break;
case WIFI_DATA_TYPE: /* data */
pattrib->qos = (subtype & BIT(7)) ? 1:0;
retval = validate_recv_data_frame(adapter, precv_frame);
if (retval == _FAIL) {
struct recv_priv *precvpriv = &adapter->recvpriv;
precvpriv->rx_drop++;
} else if (retval == _SUCCESS) {
#ifdef DBG_RX_DUMP_EAP
u8 bDumpRxPkt;
u16 eth_type;
/* dump eapol */
rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt));
/* get ether_type */
memcpy(ð_type, ptr + pattrib->hdrlen + pattrib->iv_len + LLC_HEADER_LENGTH, 2);
eth_type = ntohs((unsigned short) eth_type);
#endif
}
break;
default:
retval = _FAIL;
break;
}
exit:
return retval;
}
/* remove the wlanhdr and add the eth_hdr */
static signed int wlanhdr_to_ethhdr(union recv_frame *precvframe)
{
signed int rmv_len;
u16 eth_type, len;
u8 bsnaphdr;
u8 *psnap_type;
struct ieee80211_snap_hdr *psnap;
__be16 be_tmp;
struct adapter *adapter = precvframe->u.hdr.adapter;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *ptr = precvframe->u.hdr.rx_data; /* point to frame_ctrl field */
struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
if (pattrib->encrypt)
recvframe_pull_tail(precvframe, pattrib->icv_len);
psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
/* convert hdr + possible LLC headers into Ethernet header */
/* eth_type = (psnap_type[0] << 8) | psnap_type[1]; */
if ((!memcmp(psnap, rfc1042_header, SNAP_SIZE) &&
(memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2)) &&
(memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2))) ||
/* eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) || */
!memcmp(psnap, bridge_tunnel_header, SNAP_SIZE)) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
bsnaphdr = true;
} else
/* Leave Ethernet header part of hdr and full payload */
bsnaphdr = false;
rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr?SNAP_SIZE:0);
len = precvframe->u.hdr.len - rmv_len;
memcpy(&be_tmp, ptr+rmv_len, 2);
eth_type = ntohs(be_tmp); /* pattrib->ether_type */
pattrib->eth_type = eth_type;
if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)) {
ptr += rmv_len;
*ptr = 0x87;
*(ptr+1) = 0x12;
eth_type = 0x8712;
/* append rx status for mp test packets */
ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr)+2)-24);
if (!ptr)
return _FAIL;
memcpy(ptr, get_rxmem(precvframe), 24);
ptr += 24;
} else {
ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr) + (bsnaphdr?2:0)));
if (!ptr)
return _FAIL;
}
memcpy(ptr, pattrib->dst, ETH_ALEN);
memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN);
if (!bsnaphdr) {
be_tmp = htons(len);
memcpy(ptr+12, &be_tmp, 2);
}
return _SUCCESS;
}
static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
{
int a_len, padding_len;
u16 nSubframe_Length;
u8 nr_subframes, i;
u8 *pdata;
struct sk_buff *sub_pkt, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
nr_subframes = 0;
recvframe_pull(prframe, prframe->u.hdr.attrib.hdrlen);
if (prframe->u.hdr.attrib.iv_len > 0)
recvframe_pull(prframe, prframe->u.hdr.attrib.iv_len);
a_len = prframe->u.hdr.len;
pdata = prframe->u.hdr.rx_data;
while (a_len > ETH_HLEN) {
/* Offset 12 denote 2 mac address */
nSubframe_Length = get_unaligned_be16(pdata + 12);
if (a_len < ETH_HLEN + nSubframe_Length)
break;
sub_pkt = rtw_os_alloc_msdu_pkt(prframe, nSubframe_Length, pdata);
if (!sub_pkt)
break;
/* move the data point to data content */
pdata += ETH_HLEN;
a_len -= ETH_HLEN;
subframes[nr_subframes++] = sub_pkt;
if (nr_subframes >= MAX_SUBFRAME_COUNT)
break;
pdata += nSubframe_Length;
a_len -= nSubframe_Length;
if (a_len != 0) {
padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & (4-1));
if (padding_len == 4)
padding_len = 0;
if (a_len < padding_len)
break;
pdata += padding_len;
a_len -= padding_len;
}
}
for (i = 0; i < nr_subframes; i++) {
sub_pkt = subframes[i];
/* Indicate the packets to upper layer */
if (sub_pkt)
rtw_os_recv_indicate_pkt(padapter, sub_pkt, &prframe->u.hdr.attrib);
}
prframe->u.hdr.len = 0;
rtw_free_recvframe(prframe, pfree_recv_queue);/* free this recv_frame */
return _SUCCESS;
}
static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num)
{
struct adapter *padapter = preorder_ctrl->padapter;
struct dvobj_priv *psdpriv = padapter->dvobj;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
u8 wsize = preorder_ctrl->wsize_b;
u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) & 0xFFF;/* 4096; */
/* Rx Reorder initialize condition. */
if (preorder_ctrl->indicate_seq == 0xFFFF)
preorder_ctrl->indicate_seq = seq_num;
/* Drop out the packet which SeqNum is smaller than WinStart */
if (SN_LESS(seq_num, preorder_ctrl->indicate_seq))
return false;
/* */
/* Sliding window manipulation. Conditions includes: */
/* 1. Incoming SeqNum is equal to WinStart =>Window shift 1 */
/* 2. Incoming SeqNum is larger than the WinEnd => Window shift N */
/* */
if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq)) {
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
} else if (SN_LESS(wend, seq_num)) {
/* boundary situation, when seq_num cross 0xFFF */
if (seq_num >= (wsize - 1))
preorder_ctrl->indicate_seq = seq_num + 1 - wsize;
else
preorder_ctrl->indicate_seq = 0xFFF - (wsize - (seq_num + 1)) + 1;
pdbgpriv->dbg_rx_ampdu_window_shift_cnt++;
}
return true;
}
static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe)
{
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
struct list_head *phead, *plist;
union recv_frame *pnextrframe;
struct rx_pkt_attrib *pnextattrib;
/* spin_lock_irqsave(&ppending_recvframe_queue->lock, irql); */
/* spin_lock(&ppending_recvframe_queue->lock); */
phead = get_list_head(ppending_recvframe_queue);
plist = get_next(phead);
while (phead != plist) {
pnextrframe = (union recv_frame *)plist;
pnextattrib = &pnextrframe->u.hdr.attrib;
if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
plist = get_next(plist);
else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
/* Duplicate entry is found!! Do not insert current entry. */
/* spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); */
return false;
else
break;
}
/* spin_lock_irqsave(&ppending_recvframe_queue->lock, irql); */
/* spin_lock(&ppending_recvframe_queue->lock); */
list_del_init(&(prframe->u.hdr.list));
list_add_tail(&(prframe->u.hdr.list), plist);
/* spin_unlock(&ppending_recvframe_queue->lock); */
/* spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); */
return true;
}
static void recv_indicatepkts_pkt_loss_cnt(struct debug_priv *pdbgpriv, u64 prev_seq, u64 current_seq)
{
if (current_seq < prev_seq)
pdbgpriv->dbg_rx_ampdu_loss_count += (4096 + current_seq - prev_seq);
else
pdbgpriv->dbg_rx_ampdu_loss_count += (current_seq - prev_seq);
}
static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced)
{
struct list_head *phead, *plist;
union recv_frame *prframe;
struct rx_pkt_attrib *pattrib;
/* u8 index = 0; */
int bPktInBuf = false;
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
struct dvobj_priv *psdpriv = padapter->dvobj;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
/* spin_lock_irqsave(&ppending_recvframe_queue->lock, irql); */
/* spin_lock(&ppending_recvframe_queue->lock); */
phead = get_list_head(ppending_recvframe_queue);
plist = get_next(phead);
/* Handling some condition for forced indicate case. */
if (bforced == true) {
pdbgpriv->dbg_rx_ampdu_forced_indicate_count++;
if (list_empty(phead)) {
/* spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); */
/* spin_unlock(&ppending_recvframe_queue->lock); */
return true;
}
prframe = (union recv_frame *)plist;
pattrib = &prframe->u.hdr.attrib;
recv_indicatepkts_pkt_loss_cnt(pdbgpriv, preorder_ctrl->indicate_seq, pattrib->seq_num);
preorder_ctrl->indicate_seq = pattrib->seq_num;
}
/* Prepare indication list and indication. */
/* Check if there is any packet need indicate. */
while (!list_empty(phead)) {
prframe = (union recv_frame *)plist;
pattrib = &prframe->u.hdr.attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
plist = get_next(plist);
list_del_init(&(prframe->u.hdr.list));
if (SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num))
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
/* Set this as a lock to make sure that only one thread is indicating packet. */
/* pTS->RxIndicateState = RXTS_INDICATE_PROCESSING; */
/* Indicate packets */
/* indicate this recv_frame */
if (!pattrib->amsdu) {
if ((padapter->bDriverStopped == false) &&
(padapter->bSurpriseRemoved == false))
rtw_recv_indicatepkt(padapter, prframe);/* indicate this recv_frame */
} else if (pattrib->amsdu == 1) {
if (amsdu_to_msdu(padapter, prframe) != _SUCCESS)
rtw_free_recvframe(prframe, &precvpriv->free_recv_queue);
} else {
/* error condition; */
}
/* Update local variables. */
bPktInBuf = false;
} else {
bPktInBuf = true;
break;
}
}
/* spin_unlock(&ppending_recvframe_queue->lock); */
/* spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); */
return bPktInBuf;
}
static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *prframe)
{
int retval = _SUCCESS;
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
struct dvobj_priv *psdpriv = padapter->dvobj;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
if (!pattrib->amsdu) {
/* s1. */
wlanhdr_to_ethhdr(prframe);
if (pattrib->qos != 1) {
if ((padapter->bDriverStopped == false) &&
(padapter->bSurpriseRemoved == false)) {
rtw_recv_indicatepkt(padapter, prframe);
return _SUCCESS;
}
return _FAIL;
}
if (preorder_ctrl->enable == false) {
/* indicate this recv_frame */
preorder_ctrl->indicate_seq = pattrib->seq_num;
rtw_recv_indicatepkt(padapter, prframe);
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
return _SUCCESS;
}
} else if (pattrib->amsdu == 1) { /* temp filter -> means didn't support A-MSDUs in a A-MPDU */
if (preorder_ctrl->enable == false) {
preorder_ctrl->indicate_seq = pattrib->seq_num;
retval = amsdu_to_msdu(padapter, prframe);
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
if (retval != _SUCCESS) {
}
return retval;
}
}
spin_lock_bh(&ppending_recvframe_queue->lock);
/* s2. check if winstart_b(indicate_seq) needs to been updated */
if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num)) {
pdbgpriv->dbg_rx_ampdu_drop_count++;
goto _err_exit;
}
/* s3. Insert all packet into Reorder Queue to maintain its ordering. */
if (!enqueue_reorder_recvframe(preorder_ctrl, prframe)) {
/* spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); */
/* return _FAIL; */
goto _err_exit;
}
/* s4. */
/* Indication process. */
/* After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets */
/* with the SeqNum smaller than latest WinStart and buffer other packets. */
/* */
/* For Rx Reorder condition: */
/* 1. All packets with SeqNum smaller than WinStart => Indicate */
/* 2. All packets with SeqNum larger than or equal to WinStart => Buffer it. */
/* */
/* recv_indicatepkts_in_order(padapter, preorder_ctrl, true); */
if (recv_indicatepkts_in_order(padapter, preorder_ctrl, false) == true) {
_set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
spin_unlock_bh(&ppending_recvframe_queue->lock);
} else {
spin_unlock_bh(&ppending_recvframe_queue->lock);
del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
}
return _SUCCESS;
_err_exit:
spin_unlock_bh(&ppending_recvframe_queue->lock);
return _FAIL;
}
void rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
{
struct recv_reorder_ctrl *preorder_ctrl =
from_timer(preorder_ctrl, t, reordering_ctrl_timer);
struct adapter *padapter = preorder_ctrl->padapter;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
return;
spin_lock_bh(&ppending_recvframe_queue->lock);
if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true) == true)
_set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
spin_unlock_bh(&ppending_recvframe_queue->lock);
}
static int process_recv_indicatepkts(struct adapter *padapter, union recv_frame *prframe)
{
int retval = _SUCCESS;
/* struct recv_priv *precvpriv = &padapter->recvpriv; */
/* struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib; */
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
if (phtpriv->ht_option == true) { /* B/G/N Mode */
/* prframe->u.hdr.preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority]; */
if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) { /* including perform A-MPDU Rx Ordering Buffer Control */
if ((padapter->bDriverStopped == false) &&
(padapter->bSurpriseRemoved == false)) {
retval = _FAIL;
return retval;
}
}
} else { /* B/G mode */
retval = wlanhdr_to_ethhdr(prframe);
if (retval != _SUCCESS)
return retval;
if ((padapter->bDriverStopped == false) && (padapter->bSurpriseRemoved == false)) {
/* indicate this recv_frame */
rtw_recv_indicatepkt(padapter, prframe);
} else {
retval = _FAIL;
return retval;
}
}
return retval;
}
static int recv_func_prehandle(struct adapter *padapter, union recv_frame *rframe)
{
int ret = _SUCCESS;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
/* check the frame crtl field and decache */
ret = validate_recv_frame(padapter, rframe);
if (ret != _SUCCESS) {
rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
goto exit;
}
exit:
return ret;
}
static int recv_func_posthandle(struct adapter *padapter, union recv_frame *prframe)
{
int ret = _SUCCESS;
union recv_frame *orig_prframe = prframe;
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
prframe = decryptor(padapter, prframe);
if (!prframe) {
ret = _FAIL;
goto _recv_data_drop;
}
prframe = recvframe_chk_defrag(padapter, prframe);
if (!prframe)
goto _recv_data_drop;
prframe = portctrl(padapter, prframe);
if (!prframe) {
ret = _FAIL;
goto _recv_data_drop;
}
count_rx_stats(padapter, prframe, NULL);
ret = process_recv_indicatepkts(padapter, prframe);
if (ret != _SUCCESS) {
rtw_free_recvframe(orig_prframe, pfree_recv_queue);/* free this recv_frame */
goto _recv_data_drop;
}
_recv_data_drop:
precvpriv->rx_drop++;
return ret;
}
static int recv_func(struct adapter *padapter, union recv_frame *rframe)
{
int ret;
struct rx_pkt_attrib *prxattrib = &rframe->u.hdr.attrib;
struct recv_priv *recvpriv = &padapter->recvpriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_priv *mlmepriv = &padapter->mlmepriv;
/* check if need to handle uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
union recv_frame *pending_frame;
int cnt = 0;
while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) {
cnt++;
recv_func_posthandle(padapter, pending_frame);
}
}
ret = recv_func_prehandle(padapter, rframe);
if (ret == _SUCCESS) {
/* check if need to enqueue into uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
!is_multicast_ether_addr(prxattrib->ra) && prxattrib->encrypt > 0 &&
(prxattrib->bdecrypted == 0 || psecuritypriv->sw_decrypt == true) &&
psecuritypriv->ndisauthtype == Ndis802_11AuthModeWPAPSK &&
!psecuritypriv->busetkipkey) {
rtw_enqueue_recvframe(rframe, &padapter->recvpriv.uc_swdec_pending_queue);
if (recvpriv->free_recvframe_cnt < NR_RECVFRAME/4) {
/* to prevent from recvframe starvation, get recvframe from uc_swdec_pending_queue to free_recvframe_cnt */
rframe = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue);
if (rframe)
goto do_posthandle;
}
goto exit;
}
do_posthandle:
ret = recv_func_posthandle(padapter, rframe);
}
exit:
return ret;
}
s32 rtw_recv_entry(union recv_frame *precvframe)
{
struct adapter *padapter;
struct recv_priv *precvpriv;
s32 ret = _SUCCESS;
padapter = precvframe->u.hdr.adapter;
precvpriv = &padapter->recvpriv;
ret = recv_func(padapter, precvframe);
if (ret == _FAIL)
goto _recv_entry_drop;
precvpriv->rx_pkts++;
return ret;
_recv_entry_drop:
return ret;
}
static void rtw_signal_stat_timer_hdl(struct timer_list *t)
{
struct adapter *adapter =
from_timer(adapter, t, recvpriv.signal_stat_timer);
struct recv_priv *recvpriv = &adapter->recvpriv;
u32 tmp_s, tmp_q;
u8 avg_signal_strength = 0;
u8 avg_signal_qual = 0;
u32 num_signal_strength = 0;
u32 __maybe_unused num_signal_qual = 0;
u8 _alpha = 5; /* this value is based on converging_constant = 5000 and sampling_interval = 1000 */
if (adapter->recvpriv.is_signal_dbg) {
/* update the user specific value, signal_strength_dbg, to signal_strength, rssi */
adapter->recvpriv.signal_strength = adapter->recvpriv.signal_strength_dbg;
adapter->recvpriv.rssi = (s8)translate_percentage_to_dbm((u8)adapter->recvpriv.signal_strength_dbg);
} else {
if (recvpriv->signal_strength_data.update_req == 0) {/* update_req is clear, means we got rx */
avg_signal_strength = recvpriv->signal_strength_data.avg_val;
num_signal_strength = recvpriv->signal_strength_data.total_num;
/* after avg_vals are acquired, we can re-stat the signal values */
recvpriv->signal_strength_data.update_req = 1;
}
if (recvpriv->signal_qual_data.update_req == 0) {/* update_req is clear, means we got rx */
avg_signal_qual = recvpriv->signal_qual_data.avg_val;
num_signal_qual = recvpriv->signal_qual_data.total_num;
/* after avg_vals are acquired, we can re-stat the signal values */
recvpriv->signal_qual_data.update_req = 1;
}
if (num_signal_strength == 0) {
if (rtw_get_on_cur_ch_time(adapter) == 0 ||
jiffies_to_msecs(jiffies - rtw_get_on_cur_ch_time(adapter)) < 2 * adapter->mlmeextpriv.mlmext_info.bcn_interval
) {
goto set_timer;
}
}
if (check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY) == true ||
check_fwstate(&adapter->mlmepriv, _FW_LINKED) == false
) {
goto set_timer;
}
/* update value of signal_strength, rssi, signal_qual */
tmp_s = (avg_signal_strength+(_alpha-1)*recvpriv->signal_strength);
if (tmp_s % _alpha)
tmp_s = tmp_s/_alpha + 1;
else
tmp_s = tmp_s/_alpha;
if (tmp_s > 100)
tmp_s = 100;
tmp_q = (avg_signal_qual+(_alpha-1)*recvpriv->signal_qual);
if (tmp_q % _alpha)
tmp_q = tmp_q/_alpha + 1;
else
tmp_q = tmp_q/_alpha;
if (tmp_q > 100)
tmp_q = 100;
recvpriv->signal_strength = tmp_s;
recvpriv->rssi = (s8)translate_percentage_to_dbm(tmp_s);
recvpriv->signal_qual = tmp_q;
}
set_timer:
rtw_set_signal_stat_timer(recvpriv);
}
| linux-master | drivers/staging/rtl8723bs/core/rtw_recv.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.