python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* mac80211 TDLS handling code
*
* Copyright 2006-2010 Johannes Berg <[email protected]>
* Copyright 2014, Intel Corporation
* Copyright 2014 Intel Mobile Communications GmbH
* Copyright 2015 - 2016 Intel Deutschland GmbH
* Copyright (C) 2019, 2021-2023 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <linux/log2.h>
#include <net/cfg80211.h>
#include <linux/rtnetlink.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "rate.h"
#include "wme.h"
/* give usermode some time for retries in setting up the TDLS session */
#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
void ieee80211_tdls_peer_del_work(struct work_struct *wk)
{
struct ieee80211_sub_if_data *sdata;
struct ieee80211_local *local;
sdata = container_of(wk, struct ieee80211_sub_if_data,
u.mgd.tdls_peer_del_work.work);
local = sdata->local;
mutex_lock(&local->mtx);
if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer)) {
tdls_dbg(sdata, "TDLS del peer %pM\n", sdata->u.mgd.tdls_peer);
sta_info_destroy_addr(sdata, sdata->u.mgd.tdls_peer);
eth_zero_addr(sdata->u.mgd.tdls_peer);
}
mutex_unlock(&local->mtx);
}
static void ieee80211_tdls_add_ext_capab(struct ieee80211_link_data *link,
struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
bool chan_switch = local->hw.wiphy->features &
NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
!ifmgd->tdls_wider_bw_prohibited;
bool buffer_sta = ieee80211_hw_check(&local->hw,
SUPPORTS_TDLS_BUFFER_STA);
struct ieee80211_supported_band *sband = ieee80211_get_link_sband(link);
bool vht = sband && sband->vht_cap.vht_supported;
u8 *pos = skb_put(skb, 10);
*pos++ = WLAN_EID_EXT_CAPABILITY;
*pos++ = 8; /* len */
*pos++ = 0x0;
*pos++ = 0x0;
*pos++ = 0x0;
*pos++ = (chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0) |
(buffer_sta ? WLAN_EXT_CAPA4_TDLS_BUFFER_STA : 0);
*pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
*pos++ = 0;
*pos++ = 0;
*pos++ = (vht && wider_band) ? WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED : 0;
}
static u8
ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u16 start, u16 end,
u16 spacing)
{
u8 subband_cnt = 0, ch_cnt = 0;
struct ieee80211_channel *ch;
struct cfg80211_chan_def chandef;
int i, subband_start;
struct wiphy *wiphy = sdata->local->hw.wiphy;
for (i = start; i <= end; i += spacing) {
if (!ch_cnt)
subband_start = i;
ch = ieee80211_get_channel(sdata->local->hw.wiphy, i);
if (ch) {
/* we will be active on the channel */
cfg80211_chandef_create(&chandef, ch,
NL80211_CHAN_NO_HT);
if (cfg80211_reg_can_beacon_relax(wiphy, &chandef,
sdata->wdev.iftype)) {
ch_cnt++;
/*
* check if the next channel is also part of
* this allowed range
*/
continue;
}
}
/*
* we've reached the end of a range, with allowed channels
* found
*/
if (ch_cnt) {
u8 *pos = skb_put(skb, 2);
*pos++ = ieee80211_frequency_to_channel(subband_start);
*pos++ = ch_cnt;
subband_cnt++;
ch_cnt = 0;
}
}
/* all channels in the requested range are allowed - add them here */
if (ch_cnt) {
u8 *pos = skb_put(skb, 2);
*pos++ = ieee80211_frequency_to_channel(subband_start);
*pos++ = ch_cnt;
subband_cnt++;
}
return subband_cnt;
}
static void
ieee80211_tdls_add_supp_channels(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
/*
* Add possible channels for TDLS. These are channels that are allowed
* to be active.
*/
u8 subband_cnt;
u8 *pos = skb_put(skb, 2);
*pos++ = WLAN_EID_SUPPORTED_CHANNELS;
/*
* 5GHz and 2GHz channels numbers can overlap. Ignore this for now, as
* this doesn't happen in real world scenarios.
*/
/* 2GHz, with 5MHz spacing */
subband_cnt = ieee80211_tdls_add_subband(sdata, skb, 2412, 2472, 5);
/* 5GHz, with 20MHz spacing */
subband_cnt += ieee80211_tdls_add_subband(sdata, skb, 5000, 5825, 20);
/* length */
*pos = 2 * subband_cnt;
}
static void ieee80211_tdls_add_oper_classes(struct ieee80211_link_data *link,
struct sk_buff *skb)
{
u8 *pos;
u8 op_class;
if (!ieee80211_chandef_to_operating_class(&link->conf->chandef,
&op_class))
return;
pos = skb_put(skb, 4);
*pos++ = WLAN_EID_SUPPORTED_REGULATORY_CLASSES;
*pos++ = 2; /* len */
*pos++ = op_class;
*pos++ = op_class; /* give current operating class as alternate too */
}
static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb)
{
u8 *pos = skb_put(skb, 3);
*pos++ = WLAN_EID_BSS_COEX_2040;
*pos++ = 1; /* len */
*pos++ = WLAN_BSS_COEX_INFORMATION_REQUEST;
}
static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_link_data *link,
u16 status_code)
{
struct ieee80211_supported_band *sband;
/* The capability will be 0 when sending a failure code */
if (status_code != 0)
return 0;
sband = ieee80211_get_link_sband(link);
if (sband && sband->band == NL80211_BAND_2GHZ) {
return WLAN_CAPABILITY_SHORT_SLOT_TIME |
WLAN_CAPABILITY_SHORT_PREAMBLE;
}
return 0;
}
static void ieee80211_tdls_add_link_ie(struct ieee80211_link_data *link,
struct sk_buff *skb, const u8 *peer,
bool initiator)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_tdls_lnkie *lnkid;
const u8 *init_addr, *rsp_addr;
if (initiator) {
init_addr = sdata->vif.addr;
rsp_addr = peer;
} else {
init_addr = peer;
rsp_addr = sdata->vif.addr;
}
lnkid = skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
lnkid->ie_type = WLAN_EID_LINK_ID;
lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
memcpy(lnkid->bssid, link->u.mgd.bssid, ETH_ALEN);
memcpy(lnkid->init_sta, init_addr, ETH_ALEN);
memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN);
}
static void
ieee80211_tdls_add_aid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
u8 *pos = skb_put(skb, 4);
*pos++ = WLAN_EID_AID;
*pos++ = 2; /* len */
put_unaligned_le16(sdata->vif.cfg.aid, pos);
}
/* translate numbering in the WMM parameter IE to the mac80211 notation */
static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac)
{
switch (ac) {
default:
WARN_ON_ONCE(1);
fallthrough;
case 0:
return IEEE80211_AC_BE;
case 1:
return IEEE80211_AC_BK;
case 2:
return IEEE80211_AC_VI;
case 3:
return IEEE80211_AC_VO;
}
}
static u8 ieee80211_wmm_aci_aifsn(int aifsn, bool acm, int aci)
{
u8 ret;
ret = aifsn & 0x0f;
if (acm)
ret |= 0x10;
ret |= (aci << 5) & 0x60;
return ret;
}
static u8 ieee80211_wmm_ecw(u16 cw_min, u16 cw_max)
{
return ((ilog2(cw_min + 1) << 0x0) & 0x0f) |
((ilog2(cw_max + 1) << 0x4) & 0xf0);
}
static void ieee80211_tdls_add_wmm_param_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_wmm_param_ie *wmm;
struct ieee80211_tx_queue_params *txq;
int i;
wmm = skb_put_zero(skb, sizeof(*wmm));
wmm->element_id = WLAN_EID_VENDOR_SPECIFIC;
wmm->len = sizeof(*wmm) - 2;
wmm->oui[0] = 0x00; /* Microsoft OUI 00:50:F2 */
wmm->oui[1] = 0x50;
wmm->oui[2] = 0xf2;
wmm->oui_type = 2; /* WME */
wmm->oui_subtype = 1; /* WME param */
wmm->version = 1; /* WME ver */
wmm->qos_info = 0; /* U-APSD not in use */
/*
* Use the EDCA parameters defined for the BSS, or default if the AP
* doesn't support it, as mandated by 802.11-2012 section 10.22.4
*/
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = &sdata->deflink.tx_conf[ieee80211_ac_from_wmm(i)];
wmm->ac[i].aci_aifsn = ieee80211_wmm_aci_aifsn(txq->aifs,
txq->acm, i);
wmm->ac[i].cw = ieee80211_wmm_ecw(txq->cw_min, txq->cw_max);
wmm->ac[i].txop_limit = cpu_to_le16(txq->txop);
}
}
static void
ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
/* IEEE802.11ac-2013 Table E-4 */
u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
struct cfg80211_chan_def uc = sta->tdls_chandef;
enum nl80211_chan_width max_width =
ieee80211_sta_cap_chan_bw(&sta->deflink);
int i;
/* only support upgrading non-narrow channels up to 80Mhz */
if (max_width == NL80211_CHAN_WIDTH_5 ||
max_width == NL80211_CHAN_WIDTH_10)
return;
if (max_width > NL80211_CHAN_WIDTH_80)
max_width = NL80211_CHAN_WIDTH_80;
if (uc.width >= max_width)
return;
/*
* Channel usage constrains in the IEEE802.11ac-2013 specification only
* allow expanding a 20MHz channel to 80MHz in a single way. In
* addition, there are no 40MHz allowed channels that are not part of
* the allowed 80MHz range in the 5GHz spectrum (the relevant one here).
*/
for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
uc.center_freq1 = centers_80mhz[i];
uc.center_freq2 = 0;
uc.width = NL80211_CHAN_WIDTH_80;
break;
}
if (!uc.center_freq1)
return;
/* proceed to downgrade the chandef until usable or the same as AP BW */
while (uc.width > max_width ||
(uc.width > sta->tdls_chandef.width &&
!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
sdata->wdev.iftype)))
ieee80211_chandef_downgrade(&uc);
if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n",
sta->tdls_chandef.width, uc.width);
/*
* the station is not yet authorized when BW upgrade is done,
* locking is not required
*/
sta->tdls_chandef = uc;
}
}
static void
ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
struct sk_buff *skb, const u8 *peer,
u8 action_code, bool initiator,
const u8 *extra_ies, size_t extra_ies_len)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_supported_band *sband;
struct ieee80211_local *local = sdata->local;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
const struct ieee80211_sta_he_cap *he_cap;
const struct ieee80211_sta_eht_cap *eht_cap;
struct sta_info *sta = NULL;
size_t offset = 0, noffset;
u8 *pos;
sband = ieee80211_get_link_sband(link);
if (WARN_ON_ONCE(!sband))
return;
ieee80211_add_srates_ie(sdata, skb, false, sband->band);
ieee80211_add_ext_srates_ie(sdata, skb, false, sband->band);
ieee80211_tdls_add_supp_channels(sdata, skb);
/* add any custom IEs that go before Extended Capabilities */
if (extra_ies_len) {
static const u8 before_ext_cap[] = {
WLAN_EID_SUPP_RATES,
WLAN_EID_COUNTRY,
WLAN_EID_EXT_SUPP_RATES,
WLAN_EID_SUPPORTED_CHANNELS,
WLAN_EID_RSN,
};
noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
before_ext_cap,
ARRAY_SIZE(before_ext_cap),
offset);
skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
ieee80211_tdls_add_ext_capab(link, skb);
/* add the QoS element if we support it */
if (local->hw.queues >= IEEE80211_NUM_ACS &&
action_code != WLAN_PUB_ACTION_TDLS_DISCOVER_RES)
ieee80211_add_wmm_info_ie(skb_put(skb, 9), 0); /* no U-APSD */
/* add any custom IEs that go before HT capabilities */
if (extra_ies_len) {
static const u8 before_ht_cap[] = {
WLAN_EID_SUPP_RATES,
WLAN_EID_COUNTRY,
WLAN_EID_EXT_SUPP_RATES,
WLAN_EID_SUPPORTED_CHANNELS,
WLAN_EID_RSN,
WLAN_EID_EXT_CAPABILITY,
WLAN_EID_QOS_CAPA,
WLAN_EID_FAST_BSS_TRANSITION,
WLAN_EID_TIMEOUT_INTERVAL,
WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
};
noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
before_ht_cap,
ARRAY_SIZE(before_ht_cap),
offset);
skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
/* we should have the peer STA if we're already responding */
if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
sta = sta_info_get(sdata, peer);
if (WARN_ON_ONCE(!sta))
return;
sta->tdls_chandef = link->conf->chandef;
}
ieee80211_tdls_add_oper_classes(link, skb);
/*
* with TDLS we can switch channels, and HT-caps are not necessarily
* the same on all bands. The specification limits the setup to a
* single HT-cap, so use the current band for now.
*/
memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) &&
ht_cap.ht_supported) {
ieee80211_apply_htcap_overrides(sdata, &ht_cap);
/* disable SMPS in TDLS initiator */
ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
<< IEEE80211_HT_CAP_SM_PS_SHIFT;
pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
} else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
ht_cap.ht_supported && sta->sta.deflink.ht_cap.ht_supported) {
/* the peer caps are already intersected with our own */
memcpy(&ht_cap, &sta->sta.deflink.ht_cap, sizeof(ht_cap));
pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
}
if (ht_cap.ht_supported &&
(ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
ieee80211_tdls_add_bss_coex_ie(skb);
ieee80211_tdls_add_link_ie(link, skb, peer, initiator);
/* add any custom IEs that go before VHT capabilities */
if (extra_ies_len) {
static const u8 before_vht_cap[] = {
WLAN_EID_SUPP_RATES,
WLAN_EID_COUNTRY,
WLAN_EID_EXT_SUPP_RATES,
WLAN_EID_SUPPORTED_CHANNELS,
WLAN_EID_RSN,
WLAN_EID_EXT_CAPABILITY,
WLAN_EID_QOS_CAPA,
WLAN_EID_FAST_BSS_TRANSITION,
WLAN_EID_TIMEOUT_INTERVAL,
WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
WLAN_EID_MULTI_BAND,
};
noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
before_vht_cap,
ARRAY_SIZE(before_vht_cap),
offset);
skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
/* add AID if VHT, HE or EHT capabilities supported */
memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
if ((vht_cap.vht_supported || he_cap || eht_cap) &&
(action_code == WLAN_TDLS_SETUP_REQUEST ||
action_code == WLAN_TDLS_SETUP_RESPONSE))
ieee80211_tdls_add_aid(sdata, skb);
/* build the VHT-cap similarly to the HT-cap */
if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) &&
vht_cap.vht_supported) {
ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
} else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
vht_cap.vht_supported && sta->sta.deflink.vht_cap.vht_supported) {
/* the peer caps are already intersected with our own */
memcpy(&vht_cap, &sta->sta.deflink.vht_cap, sizeof(vht_cap));
pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
/*
* if both peers support WIDER_BW, we can expand the chandef to
* a wider compatible one, up to 80MHz
*/
if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
}
/* add any custom IEs that go before HE capabilities */
if (extra_ies_len) {
static const u8 before_he_cap[] = {
WLAN_EID_EXTENSION,
WLAN_EID_EXT_FILS_REQ_PARAMS,
WLAN_EID_AP_CSN,
};
noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
before_he_cap,
ARRAY_SIZE(before_he_cap),
offset);
skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
/* build the HE-cap from sband */
if (he_cap &&
(action_code == WLAN_TDLS_SETUP_REQUEST ||
action_code == WLAN_TDLS_SETUP_RESPONSE ||
action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
__le16 he_6ghz_capa;
u8 cap_size;
cap_size =
2 + 1 + sizeof(he_cap->he_cap_elem) +
ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
ieee80211_he_ppe_size(he_cap->ppe_thres[0],
he_cap->he_cap_elem.phy_cap_info);
pos = skb_put(skb, cap_size);
pos = ieee80211_ie_build_he_cap(0, pos, he_cap, pos + cap_size);
/* Build HE 6Ghz capa IE from sband */
if (sband->band == NL80211_BAND_6GHZ) {
cap_size = 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
pos = skb_put(skb, cap_size);
he_6ghz_capa =
ieee80211_get_he_6ghz_capa_vif(sband, &sdata->vif);
pos = ieee80211_write_he_6ghz_cap(pos, he_6ghz_capa,
pos + cap_size);
}
}
/* add any custom IEs that go before EHT capabilities */
if (extra_ies_len) {
static const u8 before_he_cap[] = {
WLAN_EID_EXTENSION,
WLAN_EID_EXT_FILS_REQ_PARAMS,
WLAN_EID_AP_CSN,
};
noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
before_he_cap,
ARRAY_SIZE(before_he_cap),
offset);
skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
/* build the EHT-cap from sband */
if (he_cap && eht_cap &&
(action_code == WLAN_TDLS_SETUP_REQUEST ||
action_code == WLAN_TDLS_SETUP_RESPONSE ||
action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
u8 cap_size;
cap_size =
2 + 1 + sizeof(eht_cap->eht_cap_elem) +
ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
&eht_cap->eht_cap_elem, false) +
ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
eht_cap->eht_cap_elem.phy_cap_info);
pos = skb_put(skb, cap_size);
ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + cap_size, false);
}
/* add any remaining IEs */
if (extra_ies_len) {
noffset = extra_ies_len;
skb_put_data(skb, extra_ies + offset, noffset - offset);
}
}
static void
ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link,
struct sk_buff *skb, const u8 *peer,
bool initiator, const u8 *extra_ies,
size_t extra_ies_len)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
size_t offset = 0, noffset;
struct sta_info *sta, *ap_sta;
struct ieee80211_supported_band *sband;
u8 *pos;
sband = ieee80211_get_link_sband(link);
if (WARN_ON_ONCE(!sband))
return;
sta = sta_info_get(sdata, peer);
ap_sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
if (WARN_ON_ONCE(!sta || !ap_sta))
return;
sta->tdls_chandef = link->conf->chandef;
/* add any custom IEs that go before the QoS IE */
if (extra_ies_len) {
static const u8 before_qos[] = {
WLAN_EID_RSN,
};
noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
before_qos,
ARRAY_SIZE(before_qos),
offset);
skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
/* add the QoS param IE if both the peer and we support it */
if (local->hw.queues >= IEEE80211_NUM_ACS && sta->sta.wme)
ieee80211_tdls_add_wmm_param_ie(sdata, skb);
/* add any custom IEs that go before HT operation */
if (extra_ies_len) {
static const u8 before_ht_op[] = {
WLAN_EID_RSN,
WLAN_EID_QOS_CAPA,
WLAN_EID_FAST_BSS_TRANSITION,
WLAN_EID_TIMEOUT_INTERVAL,
};
noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
before_ht_op,
ARRAY_SIZE(before_ht_op),
offset);
skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
/*
* if HT support is only added in TDLS, we need an HT-operation IE.
* add the IE as required by IEEE802.11-2012 9.23.3.2.
*/
if (!ap_sta->sta.deflink.ht_cap.ht_supported && sta->sta.deflink.ht_cap.ht_supported) {
u16 prot = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED |
IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
ieee80211_ie_build_ht_oper(pos, &sta->sta.deflink.ht_cap,
&link->conf->chandef, prot,
true);
}
ieee80211_tdls_add_link_ie(link, skb, peer, initiator);
/* only include VHT-operation if not on the 2.4GHz band */
if (sband->band != NL80211_BAND_2GHZ &&
sta->sta.deflink.vht_cap.vht_supported) {
/*
* if both peers support WIDER_BW, we can expand the chandef to
* a wider compatible one, up to 80MHz
*/
if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
ieee80211_ie_build_vht_oper(pos, &sta->sta.deflink.vht_cap,
&sta->tdls_chandef);
}
/* add any remaining IEs */
if (extra_ies_len) {
noffset = extra_ies_len;
skb_put_data(skb, extra_ies + offset, noffset - offset);
}
}
static void
ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_link_data *link,
struct sk_buff *skb, const u8 *peer,
bool initiator, const u8 *extra_ies,
size_t extra_ies_len, u8 oper_class,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_tdls_data *tf;
size_t offset = 0, noffset;
if (WARN_ON_ONCE(!chandef))
return;
tf = (void *)skb->data;
tf->u.chan_switch_req.target_channel =
ieee80211_frequency_to_channel(chandef->chan->center_freq);
tf->u.chan_switch_req.oper_class = oper_class;
if (extra_ies_len) {
static const u8 before_lnkie[] = {
WLAN_EID_SECONDARY_CHANNEL_OFFSET,
};
noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
before_lnkie,
ARRAY_SIZE(before_lnkie),
offset);
skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
ieee80211_tdls_add_link_ie(link, skb, peer, initiator);
/* add any remaining IEs */
if (extra_ies_len) {
noffset = extra_ies_len;
skb_put_data(skb, extra_ies + offset, noffset - offset);
}
}
static void
ieee80211_tdls_add_chan_switch_resp_ies(struct ieee80211_link_data *link,
struct sk_buff *skb, const u8 *peer,
u16 status_code, bool initiator,
const u8 *extra_ies,
size_t extra_ies_len)
{
if (status_code == 0)
ieee80211_tdls_add_link_ie(link, skb, peer, initiator);
if (extra_ies_len)
skb_put_data(skb, extra_ies, extra_ies_len);
}
static void ieee80211_tdls_add_ies(struct ieee80211_link_data *link,
struct sk_buff *skb, const u8 *peer,
u8 action_code, u16 status_code,
bool initiator, const u8 *extra_ies,
size_t extra_ies_len, u8 oper_class,
struct cfg80211_chan_def *chandef)
{
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
case WLAN_TDLS_SETUP_RESPONSE:
case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
if (status_code == 0)
ieee80211_tdls_add_setup_start_ies(link,
skb, peer,
action_code,
initiator,
extra_ies,
extra_ies_len);
break;
case WLAN_TDLS_SETUP_CONFIRM:
if (status_code == 0)
ieee80211_tdls_add_setup_cfm_ies(link, skb, peer,
initiator, extra_ies,
extra_ies_len);
break;
case WLAN_TDLS_TEARDOWN:
case WLAN_TDLS_DISCOVERY_REQUEST:
if (extra_ies_len)
skb_put_data(skb, extra_ies, extra_ies_len);
if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN)
ieee80211_tdls_add_link_ie(link, skb,
peer, initiator);
break;
case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
ieee80211_tdls_add_chan_switch_req_ies(link, skb, peer,
initiator, extra_ies,
extra_ies_len,
oper_class, chandef);
break;
case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
ieee80211_tdls_add_chan_switch_resp_ies(link, skb, peer,
status_code,
initiator, extra_ies,
extra_ies_len);
break;
}
}
static int
ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_link_data *link,
const u8 *peer, u8 action_code, u8 dialog_token,
u16 status_code, struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_tdls_data *tf;
tf = skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
memcpy(tf->da, peer, ETH_ALEN);
memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
tf->ether_type = cpu_to_be16(ETH_P_TDLS);
tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
/* network header is after the ethernet header */
skb_set_network_header(skb, ETH_HLEN);
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_SETUP_REQUEST;
skb_put(skb, sizeof(tf->u.setup_req));
tf->u.setup_req.dialog_token = dialog_token;
tf->u.setup_req.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(link,
status_code));
break;
case WLAN_TDLS_SETUP_RESPONSE:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
skb_put(skb, sizeof(tf->u.setup_resp));
tf->u.setup_resp.status_code = cpu_to_le16(status_code);
tf->u.setup_resp.dialog_token = dialog_token;
tf->u.setup_resp.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(link,
status_code));
break;
case WLAN_TDLS_SETUP_CONFIRM:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
skb_put(skb, sizeof(tf->u.setup_cfm));
tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
tf->u.setup_cfm.dialog_token = dialog_token;
break;
case WLAN_TDLS_TEARDOWN:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_TEARDOWN;
skb_put(skb, sizeof(tf->u.teardown));
tf->u.teardown.reason_code = cpu_to_le16(status_code);
break;
case WLAN_TDLS_DISCOVERY_REQUEST:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
skb_put(skb, sizeof(tf->u.discover_req));
tf->u.discover_req.dialog_token = dialog_token;
break;
case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST;
skb_put(skb, sizeof(tf->u.chan_switch_req));
break;
case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE;
skb_put(skb, sizeof(tf->u.chan_switch_resp));
tf->u.chan_switch_resp.status_code = cpu_to_le16(status_code);
break;
default:
return -EINVAL;
}
return 0;
}
static int
ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, struct ieee80211_link_data *link,
u8 action_code, u8 dialog_token,
u16 status_code, struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_mgmt *mgmt;
mgmt = skb_put_zero(skb, 24);
memcpy(mgmt->da, peer, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, link->u.mgd.bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
switch (action_code) {
case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
mgmt->u.action.u.tdls_discover_resp.action_code =
WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
mgmt->u.action.u.tdls_discover_resp.dialog_token =
dialog_token;
mgmt->u.action.u.tdls_discover_resp.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(link,
status_code));
break;
default:
return -EINVAL;
}
return 0;
}
static struct sk_buff *
ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata,
const u8 *peer, int link_id,
u8 action_code, u8 dialog_token,
u16 status_code, bool initiator,
const u8 *extra_ies, size_t extra_ies_len,
u8 oper_class,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
int ret;
struct ieee80211_link_data *link;
link_id = link_id >= 0 ? link_id : 0;
rcu_read_lock();
link = rcu_dereference(sdata->link[link_id]);
if (WARN_ON(!link))
goto unlock;
skb = netdev_alloc_skb(sdata->dev,
local->hw.extra_tx_headroom +
max(sizeof(struct ieee80211_mgmt),
sizeof(struct ieee80211_tdls_data)) +
50 + /* supported rates */
10 + /* ext capab */
26 + /* max(WMM-info, WMM-param) */
2 + max(sizeof(struct ieee80211_ht_cap),
sizeof(struct ieee80211_ht_operation)) +
2 + max(sizeof(struct ieee80211_vht_cap),
sizeof(struct ieee80211_vht_operation)) +
2 + 1 + sizeof(struct ieee80211_he_cap_elem) +
sizeof(struct ieee80211_he_mcs_nss_supp) +
IEEE80211_HE_PPE_THRES_MAX_LEN +
2 + 1 + sizeof(struct ieee80211_he_6ghz_capa) +
2 + 1 + sizeof(struct ieee80211_eht_cap_elem) +
sizeof(struct ieee80211_eht_mcs_nss_supp) +
IEEE80211_EHT_PPE_THRES_MAX_LEN +
50 + /* supported channels */
3 + /* 40/20 BSS coex */
4 + /* AID */
4 + /* oper classes */
extra_ies_len +
sizeof(struct ieee80211_tdls_lnkie));
if (!skb)
goto unlock;
skb_reserve(skb, local->hw.extra_tx_headroom);
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
case WLAN_TDLS_SETUP_RESPONSE:
case WLAN_TDLS_SETUP_CONFIRM:
case WLAN_TDLS_TEARDOWN:
case WLAN_TDLS_DISCOVERY_REQUEST:
case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
ret = ieee80211_prep_tdls_encap_data(local->hw.wiphy,
sdata->dev, link, peer,
action_code, dialog_token,
status_code, skb);
break;
case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
ret = ieee80211_prep_tdls_direct(local->hw.wiphy, sdata->dev,
peer, link, action_code,
dialog_token, status_code,
skb);
break;
default:
ret = -ENOTSUPP;
break;
}
if (ret < 0)
goto fail;
ieee80211_tdls_add_ies(link, skb, peer, action_code, status_code,
initiator, extra_ies, extra_ies_len, oper_class,
chandef);
rcu_read_unlock();
return skb;
fail:
dev_kfree_skb(skb);
unlock:
rcu_read_unlock();
return NULL;
}
static int
ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, int link_id,
u8 action_code, u8 dialog_token,
u16 status_code, u32 peer_capability,
bool initiator, const u8 *extra_ies,
size_t extra_ies_len, u8 oper_class,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct sk_buff *skb = NULL;
struct sta_info *sta;
u32 flags = 0;
int ret = 0;
rcu_read_lock();
sta = sta_info_get(sdata, peer);
/* infer the initiator if we can, to support old userspace */
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
if (sta) {
set_sta_flag(sta, WLAN_STA_TDLS_INITIATOR);
sta->sta.tdls_initiator = false;
}
fallthrough;
case WLAN_TDLS_SETUP_CONFIRM:
case WLAN_TDLS_DISCOVERY_REQUEST:
initiator = true;
break;
case WLAN_TDLS_SETUP_RESPONSE:
/*
* In some testing scenarios, we send a request and response.
* Make the last packet sent take effect for the initiator
* value.
*/
if (sta) {
clear_sta_flag(sta, WLAN_STA_TDLS_INITIATOR);
sta->sta.tdls_initiator = true;
}
fallthrough;
case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
initiator = false;
break;
case WLAN_TDLS_TEARDOWN:
case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
/* any value is ok */
break;
default:
ret = -ENOTSUPP;
break;
}
if (sta && test_sta_flag(sta, WLAN_STA_TDLS_INITIATOR))
initiator = true;
rcu_read_unlock();
if (ret < 0)
goto fail;
skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer,
link_id, action_code,
dialog_token, status_code,
initiator, extra_ies,
extra_ies_len, oper_class,
chandef);
if (!skb) {
ret = -EINVAL;
goto fail;
}
if (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
ieee80211_tx_skb_tid(sdata, skb, 7, link_id);
return 0;
}
/*
* According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
* we should default to AC_VI.
*/
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
case WLAN_TDLS_SETUP_RESPONSE:
skb->priority = 256 + 2;
break;
default:
skb->priority = 256 + 5;
break;
}
/*
* Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
* Later, if no ACK is returned from peer, we will re-send the teardown
* packet through the AP.
*/
if ((action_code == WLAN_TDLS_TEARDOWN) &&
ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
bool try_resend; /* Should we keep skb for possible resend */
/* If not sending directly to peer - no point in keeping skb */
rcu_read_lock();
sta = sta_info_get(sdata, peer);
try_resend = sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
rcu_read_unlock();
spin_lock_bh(&sdata->u.mgd.teardown_lock);
if (try_resend && !sdata->u.mgd.teardown_skb) {
/* Mark it as requiring TX status callback */
flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
/*
* skb is copied since mac80211 will later set
* properties that might not be the same as the AP,
* such as encryption, QoS, addresses, etc.
*
* No problem if skb_copy() fails, so no need to check.
*/
sdata->u.mgd.teardown_skb = skb_copy(skb, GFP_ATOMIC);
sdata->u.mgd.orig_teardown_skb = skb;
}
spin_unlock_bh(&sdata->u.mgd.teardown_lock);
}
/* disable bottom halves when entering the Tx path */
local_bh_disable();
__ieee80211_subif_start_xmit(skb, dev, flags,
IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, NULL);
local_bh_enable();
return ret;
fail:
dev_kfree_skb(skb);
return ret;
}
static int
ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, int link_id,
u8 action_code, u8 dialog_token,
u16 status_code, u32 peer_capability, bool initiator,
const u8 *extra_ies, size_t extra_ies_len)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
enum ieee80211_smps_mode smps_mode =
sdata->deflink.u.mgd.driver_smps_mode;
int ret;
/* don't support setup with forced SMPS mode that's not off */
if (smps_mode != IEEE80211_SMPS_AUTOMATIC &&
smps_mode != IEEE80211_SMPS_OFF) {
tdls_dbg(sdata, "Aborting TDLS setup due to SMPS mode %d\n",
smps_mode);
return -ENOTSUPP;
}
mutex_lock(&local->mtx);
/* we don't support concurrent TDLS peer setups */
if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer) &&
!ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) {
ret = -EBUSY;
goto out_unlock;
}
/*
* make sure we have a STA representing the peer so we drop or buffer
* non-TDLS-setup frames to the peer. We can't send other packets
* during setup through the AP path.
* Allow error packets to be sent - sometimes we don't even add a STA
* before failing the setup.
*/
if (status_code == 0) {
rcu_read_lock();
if (!sta_info_get(sdata, peer)) {
rcu_read_unlock();
ret = -ENOLINK;
goto out_unlock;
}
rcu_read_unlock();
}
ieee80211_flush_queues(local, sdata, false);
memcpy(sdata->u.mgd.tdls_peer, peer, ETH_ALEN);
mutex_unlock(&local->mtx);
/* we cannot take the mutex while preparing the setup packet */
ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer,
link_id, action_code,
dialog_token, status_code,
peer_capability, initiator,
extra_ies, extra_ies_len, 0,
NULL);
if (ret < 0) {
mutex_lock(&local->mtx);
eth_zero_addr(sdata->u.mgd.tdls_peer);
mutex_unlock(&local->mtx);
return ret;
}
ieee80211_queue_delayed_work(&sdata->local->hw,
&sdata->u.mgd.tdls_peer_del_work,
TDLS_PEER_SETUP_TIMEOUT);
return 0;
out_unlock:
mutex_unlock(&local->mtx);
return ret;
}
static int
ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, int link_id,
u8 action_code, u8 dialog_token,
u16 status_code, u32 peer_capability,
bool initiator, const u8 *extra_ies,
size_t extra_ies_len)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
int ret;
/*
* No packets can be transmitted to the peer via the AP during setup -
* the STA is set as a TDLS peer, but is not authorized.
* During teardown, we prevent direct transmissions by stopping the
* queues and flushing all direct packets.
*/
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN);
ieee80211_flush_queues(local, sdata, false);
ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer,
link_id, action_code,
dialog_token, status_code,
peer_capability, initiator,
extra_ies, extra_ies_len, 0,
NULL);
if (ret < 0)
sdata_err(sdata, "Failed sending TDLS teardown packet %d\n",
ret);
/*
* Remove the STA AUTH flag to force further traffic through the AP. If
* the STA was unreachable, it was already removed.
*/
rcu_read_lock();
sta = sta_info_get(sdata, peer);
if (sta)
clear_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
rcu_read_unlock();
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN);
return 0;
}
int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, int link_id,
u8 action_code, u8 dialog_token, u16 status_code,
u32 peer_capability, bool initiator,
const u8 *extra_ies, size_t extra_ies_len)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
int ret;
if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
return -ENOTSUPP;
/* make sure we are in managed mode, and associated */
if (sdata->vif.type != NL80211_IFTYPE_STATION ||
!sdata->u.mgd.associated)
return -EINVAL;
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
case WLAN_TDLS_SETUP_RESPONSE:
ret = ieee80211_tdls_mgmt_setup(wiphy, dev, peer,
link_id, action_code,
dialog_token, status_code,
peer_capability, initiator,
extra_ies, extra_ies_len);
break;
case WLAN_TDLS_TEARDOWN:
ret = ieee80211_tdls_mgmt_teardown(wiphy, dev, peer, link_id,
action_code, dialog_token,
status_code,
peer_capability, initiator,
extra_ies, extra_ies_len);
break;
case WLAN_TDLS_DISCOVERY_REQUEST:
/*
* Protect the discovery so we can hear the TDLS discovery
* response frame. It is transmitted directly and not buffered
* by the AP.
*/
drv_mgd_protect_tdls_discover(sdata->local, sdata);
fallthrough;
case WLAN_TDLS_SETUP_CONFIRM:
case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
/* no special handling */
ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer,
link_id, action_code,
dialog_token,
status_code,
peer_capability,
initiator, extra_ies,
extra_ies_len, 0, NULL);
break;
default:
ret = -EOPNOTSUPP;
break;
}
tdls_dbg(sdata, "TDLS mgmt action %d peer %pM link_id %d status %d\n",
action_code, peer, link_id, ret);
return ret;
}
static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *ctx;
enum nl80211_chan_width width;
struct ieee80211_supported_band *sband;
mutex_lock(&local->chanctx_mtx);
conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
if (conf) {
width = conf->def.width;
sband = local->hw.wiphy->bands[conf->def.chan->band];
ctx = container_of(conf, struct ieee80211_chanctx, conf);
ieee80211_recalc_chanctx_chantype(local, ctx);
/* if width changed and a peer is given, update its BW */
if (width != conf->def.width && sta &&
test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) {
enum ieee80211_sta_rx_bandwidth bw;
bw = ieee80211_chan_width_to_rx_bw(conf->def.width);
bw = min(bw, ieee80211_sta_cap_rx_bw(&sta->deflink));
if (bw != sta->sta.deflink.bandwidth) {
sta->sta.deflink.bandwidth = bw;
rate_control_rate_update(local, sband, sta, 0,
IEEE80211_RC_BW_CHANGED);
/*
* if a TDLS peer BW was updated, we need to
* recalc the chandef width again, to get the
* correct chanctx min_def
*/
ieee80211_recalc_chanctx_chantype(local, ctx);
}
}
}
mutex_unlock(&local->chanctx_mtx);
}
static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata)
{
struct sta_info *sta;
bool result = false;
rcu_read_lock();
list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
!test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) ||
!sta->sta.deflink.ht_cap.ht_supported)
continue;
result = true;
break;
}
rcu_read_unlock();
return result;
}
static void
iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
bool tdls_ht;
u16 protection = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED |
IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
u16 opmode;
/* Nothing to do if the BSS connection uses HT */
if (!(sdata->deflink.u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT))
return;
tdls_ht = (sta && sta->sta.deflink.ht_cap.ht_supported) ||
iee80211_tdls_have_ht_peers(sdata);
opmode = sdata->vif.bss_conf.ht_operation_mode;
if (tdls_ht)
opmode |= protection;
else
opmode &= ~protection;
if (opmode == sdata->vif.bss_conf.ht_operation_mode)
return;
sdata->vif.bss_conf.ht_operation_mode = opmode;
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_HT);
}
int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, enum nl80211_tdls_operation oper)
{
struct sta_info *sta;
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
int ret;
if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
return -ENOTSUPP;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EINVAL;
switch (oper) {
case NL80211_TDLS_ENABLE_LINK:
case NL80211_TDLS_DISABLE_LINK:
break;
case NL80211_TDLS_TEARDOWN:
case NL80211_TDLS_SETUP:
case NL80211_TDLS_DISCOVERY_REQ:
/* We don't support in-driver setup/teardown/discovery */
return -ENOTSUPP;
}
/* protect possible bss_conf changes and avoid concurrency in
* ieee80211_bss_info_change_notify()
*/
sdata_lock(sdata);
mutex_lock(&local->mtx);
tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
switch (oper) {
case NL80211_TDLS_ENABLE_LINK:
if (sdata->vif.bss_conf.csa_active) {
tdls_dbg(sdata, "TDLS: disallow link during CSA\n");
ret = -EBUSY;
break;
}
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, peer);
if (!sta) {
mutex_unlock(&local->sta_mtx);
ret = -ENOLINK;
break;
}
iee80211_tdls_recalc_chanctx(sdata, sta);
iee80211_tdls_recalc_ht_protection(sdata, sta);
set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
mutex_unlock(&local->sta_mtx);
WARN_ON_ONCE(is_zero_ether_addr(sdata->u.mgd.tdls_peer) ||
!ether_addr_equal(sdata->u.mgd.tdls_peer, peer));
ret = 0;
break;
case NL80211_TDLS_DISABLE_LINK:
/*
* The teardown message in ieee80211_tdls_mgmt_teardown() was
* created while the queues were stopped, so it might still be
* pending. Before flushing the queues we need to be sure the
* message is handled by the tasklet handling pending messages,
* otherwise we might start destroying the station before
* sending the teardown packet.
* Note that this only forces the tasklet to flush pendings -
* not to stop the tasklet from rescheduling itself.
*/
tasklet_kill(&local->tx_pending_tasklet);
/* flush a potentially queued teardown packet */
ieee80211_flush_queues(local, sdata, false);
ret = sta_info_destroy_addr(sdata, peer);
mutex_lock(&local->sta_mtx);
iee80211_tdls_recalc_ht_protection(sdata, NULL);
mutex_unlock(&local->sta_mtx);
iee80211_tdls_recalc_chanctx(sdata, NULL);
break;
default:
ret = -ENOTSUPP;
break;
}
if (ret == 0 && ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) {
cancel_delayed_work(&sdata->u.mgd.tdls_peer_del_work);
eth_zero_addr(sdata->u.mgd.tdls_peer);
}
if (ret == 0)
wiphy_work_queue(sdata->local->hw.wiphy,
&sdata->deflink.u.mgd.request_smps_work);
mutex_unlock(&local->mtx);
sdata_unlock(sdata);
return ret;
}
void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer,
enum nl80211_tdls_operation oper,
u16 reason_code, gfp_t gfp)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) {
sdata_err(sdata, "Discarding TDLS oper %d - not STA or disconnected\n",
oper);
return;
}
cfg80211_tdls_oper_request(sdata->dev, peer, oper, reason_code, gfp);
}
EXPORT_SYMBOL(ieee80211_tdls_oper_request);
static void
iee80211_tdls_add_ch_switch_timing(u8 *buf, u16 switch_time, u16 switch_timeout)
{
struct ieee80211_ch_switch_timing *ch_sw;
*buf++ = WLAN_EID_CHAN_SWITCH_TIMING;
*buf++ = sizeof(struct ieee80211_ch_switch_timing);
ch_sw = (void *)buf;
ch_sw->switch_time = cpu_to_le16(switch_time);
ch_sw->switch_timeout = cpu_to_le16(switch_timeout);
}
/* find switch timing IE in SKB ready for Tx */
static const u8 *ieee80211_tdls_find_sw_timing_ie(struct sk_buff *skb)
{
struct ieee80211_tdls_data *tf;
const u8 *ie_start;
/*
* Get the offset for the new location of the switch timing IE.
* The SKB network header will now point to the "payload_type"
* element of the TDLS data frame struct.
*/
tf = container_of(skb->data + skb_network_offset(skb),
struct ieee80211_tdls_data, payload_type);
ie_start = tf->u.chan_switch_req.variable;
return cfg80211_find_ie(WLAN_EID_CHAN_SWITCH_TIMING, ie_start,
skb->len - (ie_start - skb->data));
}
static struct sk_buff *
ieee80211_tdls_ch_sw_tmpl_get(struct sta_info *sta, u8 oper_class,
struct cfg80211_chan_def *chandef,
u32 *ch_sw_tm_ie_offset)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
u8 extra_ies[2 + sizeof(struct ieee80211_sec_chan_offs_ie) +
2 + sizeof(struct ieee80211_ch_switch_timing)];
int extra_ies_len = 2 + sizeof(struct ieee80211_ch_switch_timing);
u8 *pos = extra_ies;
struct sk_buff *skb;
int link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
/*
* if chandef points to a wide channel add a Secondary-Channel
* Offset information element
*/
if (chandef->width == NL80211_CHAN_WIDTH_40) {
struct ieee80211_sec_chan_offs_ie *sec_chan_ie;
bool ht40plus;
*pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET;
*pos++ = sizeof(*sec_chan_ie);
sec_chan_ie = (void *)pos;
ht40plus = cfg80211_get_chandef_type(chandef) ==
NL80211_CHAN_HT40PLUS;
sec_chan_ie->sec_chan_offs = ht40plus ?
IEEE80211_HT_PARAM_CHA_SEC_ABOVE :
IEEE80211_HT_PARAM_CHA_SEC_BELOW;
pos += sizeof(*sec_chan_ie);
extra_ies_len += 2 + sizeof(struct ieee80211_sec_chan_offs_ie);
}
/* just set the values to 0, this is a template */
iee80211_tdls_add_ch_switch_timing(pos, 0, 0);
skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr,
link_id,
WLAN_TDLS_CHANNEL_SWITCH_REQUEST,
0, 0, !sta->sta.tdls_initiator,
extra_ies, extra_ies_len,
oper_class, chandef);
if (!skb)
return NULL;
skb = ieee80211_build_data_template(sdata, skb, 0);
if (IS_ERR(skb)) {
tdls_dbg(sdata, "Failed building TDLS channel switch frame\n");
return NULL;
}
if (ch_sw_tm_ie_offset) {
const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb);
if (!tm_ie) {
tdls_dbg(sdata, "No switch timing IE in TDLS switch\n");
dev_kfree_skb_any(skb);
return NULL;
}
*ch_sw_tm_ie_offset = tm_ie - skb->data;
}
tdls_dbg(sdata,
"TDLS channel switch request template for %pM ch %d width %d\n",
sta->sta.addr, chandef->chan->center_freq, chandef->width);
return skb;
}
int
ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
const u8 *addr, u8 oper_class,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
struct sk_buff *skb = NULL;
u32 ch_sw_tm_ie;
int ret;
if (chandef->chan->freq_offset)
/* this may work, but is untested */
return -EOPNOTSUPP;
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, addr);
if (!sta) {
tdls_dbg(sdata,
"Invalid TDLS peer %pM for channel switch request\n",
addr);
ret = -ENOENT;
goto out;
}
if (!test_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH)) {
tdls_dbg(sdata, "TDLS channel switch unsupported by %pM\n",
addr);
ret = -ENOTSUPP;
goto out;
}
skb = ieee80211_tdls_ch_sw_tmpl_get(sta, oper_class, chandef,
&ch_sw_tm_ie);
if (!skb) {
ret = -ENOENT;
goto out;
}
ret = drv_tdls_channel_switch(local, sdata, &sta->sta, oper_class,
chandef, skb, ch_sw_tm_ie);
if (!ret)
set_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL);
out:
mutex_unlock(&local->sta_mtx);
dev_kfree_skb_any(skb);
return ret;
}
void
ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, addr);
if (!sta) {
tdls_dbg(sdata,
"Invalid TDLS peer %pM for channel switch cancel\n",
addr);
goto out;
}
if (!test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) {
tdls_dbg(sdata, "TDLS channel switch not initiated by %pM\n",
addr);
goto out;
}
drv_tdls_cancel_channel_switch(local, sdata, &sta->sta);
clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL);
out:
mutex_unlock(&local->sta_mtx);
}
static struct sk_buff *
ieee80211_tdls_ch_sw_resp_tmpl_get(struct sta_info *sta,
u32 *ch_sw_tm_ie_offset)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct sk_buff *skb;
u8 extra_ies[2 + sizeof(struct ieee80211_ch_switch_timing)];
int link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
/* initial timing are always zero in the template */
iee80211_tdls_add_ch_switch_timing(extra_ies, 0, 0);
skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr,
link_id,
WLAN_TDLS_CHANNEL_SWITCH_RESPONSE,
0, 0, !sta->sta.tdls_initiator,
extra_ies, sizeof(extra_ies), 0, NULL);
if (!skb)
return NULL;
skb = ieee80211_build_data_template(sdata, skb, 0);
if (IS_ERR(skb)) {
tdls_dbg(sdata,
"Failed building TDLS channel switch resp frame\n");
return NULL;
}
if (ch_sw_tm_ie_offset) {
const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb);
if (!tm_ie) {
tdls_dbg(sdata,
"No switch timing IE in TDLS switch resp\n");
dev_kfree_skb_any(skb);
return NULL;
}
*ch_sw_tm_ie_offset = tm_ie - skb->data;
}
tdls_dbg(sdata, "TDLS get channel switch response template for %pM\n",
sta->sta.addr);
return skb;
}
static int
ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee802_11_elems *elems = NULL;
struct sta_info *sta;
struct ieee80211_tdls_data *tf = (void *)skb->data;
bool local_initiator;
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
int baselen = offsetof(typeof(*tf), u.chan_switch_resp.variable);
struct ieee80211_tdls_ch_sw_params params = {};
int ret;
params.action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE;
params.timestamp = rx_status->device_timestamp;
if (skb->len < baselen) {
tdls_dbg(sdata, "TDLS channel switch resp too short: %d\n",
skb->len);
return -EINVAL;
}
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, tf->sa);
if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n",
tf->sa);
ret = -EINVAL;
goto out;
}
params.sta = &sta->sta;
params.status = le16_to_cpu(tf->u.chan_switch_resp.status_code);
if (params.status != 0) {
ret = 0;
goto call_drv;
}
elems = ieee802_11_parse_elems(tf->u.chan_switch_resp.variable,
skb->len - baselen, false, NULL);
if (!elems) {
ret = -ENOMEM;
goto out;
}
if (elems->parse_error) {
tdls_dbg(sdata, "Invalid IEs in TDLS channel switch resp\n");
ret = -EINVAL;
goto out;
}
if (!elems->ch_sw_timing || !elems->lnk_id) {
tdls_dbg(sdata, "TDLS channel switch resp - missing IEs\n");
ret = -EINVAL;
goto out;
}
/* validate the initiator is set correctly */
local_initiator =
!memcmp(elems->lnk_id->init_sta, sdata->vif.addr, ETH_ALEN);
if (local_initiator == sta->sta.tdls_initiator) {
tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n");
ret = -EINVAL;
goto out;
}
params.switch_time = le16_to_cpu(elems->ch_sw_timing->switch_time);
params.switch_timeout = le16_to_cpu(elems->ch_sw_timing->switch_timeout);
params.tmpl_skb =
ieee80211_tdls_ch_sw_resp_tmpl_get(sta, ¶ms.ch_sw_tm_ie);
if (!params.tmpl_skb) {
ret = -ENOENT;
goto out;
}
ret = 0;
call_drv:
drv_tdls_recv_channel_switch(sdata->local, sdata, ¶ms);
tdls_dbg(sdata,
"TDLS channel switch response received from %pM status %d\n",
tf->sa, params.status);
out:
mutex_unlock(&local->sta_mtx);
dev_kfree_skb_any(params.tmpl_skb);
kfree(elems);
return ret;
}
static int
ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee802_11_elems *elems;
struct cfg80211_chan_def chandef;
struct ieee80211_channel *chan;
enum nl80211_channel_type chan_type;
int freq;
u8 target_channel, oper_class;
bool local_initiator;
struct sta_info *sta;
enum nl80211_band band;
struct ieee80211_tdls_data *tf = (void *)skb->data;
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
int baselen = offsetof(typeof(*tf), u.chan_switch_req.variable);
struct ieee80211_tdls_ch_sw_params params = {};
int ret = 0;
params.action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST;
params.timestamp = rx_status->device_timestamp;
if (skb->len < baselen) {
tdls_dbg(sdata, "TDLS channel switch req too short: %d\n",
skb->len);
return -EINVAL;
}
target_channel = tf->u.chan_switch_req.target_channel;
oper_class = tf->u.chan_switch_req.oper_class;
/*
* We can't easily infer the channel band. The operating class is
* ambiguous - there are multiple tables (US/Europe/JP/Global). The
* solution here is to treat channels with number >14 as 5GHz ones,
* and specifically check for the (oper_class, channel) combinations
* where this doesn't hold. These are thankfully unique according to
* IEEE802.11-2012.
* We consider only the 2GHz and 5GHz bands and 20MHz+ channels as
* valid here.
*/
if ((oper_class == 112 || oper_class == 2 || oper_class == 3 ||
oper_class == 4 || oper_class == 5 || oper_class == 6) &&
target_channel < 14)
band = NL80211_BAND_5GHZ;
else
band = target_channel < 14 ? NL80211_BAND_2GHZ :
NL80211_BAND_5GHZ;
freq = ieee80211_channel_to_frequency(target_channel, band);
if (freq == 0) {
tdls_dbg(sdata, "Invalid channel in TDLS chan switch: %d\n",
target_channel);
return -EINVAL;
}
chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
if (!chan) {
tdls_dbg(sdata,
"Unsupported channel for TDLS chan switch: %d\n",
target_channel);
return -EINVAL;
}
elems = ieee802_11_parse_elems(tf->u.chan_switch_req.variable,
skb->len - baselen, false, NULL);
if (!elems)
return -ENOMEM;
if (elems->parse_error) {
tdls_dbg(sdata, "Invalid IEs in TDLS channel switch req\n");
ret = -EINVAL;
goto free;
}
if (!elems->ch_sw_timing || !elems->lnk_id) {
tdls_dbg(sdata, "TDLS channel switch req - missing IEs\n");
ret = -EINVAL;
goto free;
}
if (!elems->sec_chan_offs) {
chan_type = NL80211_CHAN_HT20;
} else {
switch (elems->sec_chan_offs->sec_chan_offs) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
chan_type = NL80211_CHAN_HT40PLUS;
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
chan_type = NL80211_CHAN_HT40MINUS;
break;
default:
chan_type = NL80211_CHAN_HT20;
break;
}
}
cfg80211_chandef_create(&chandef, chan, chan_type);
/* we will be active on the TDLS link */
if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef,
sdata->wdev.iftype)) {
tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n");
ret = -EINVAL;
goto free;
}
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, tf->sa);
if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n",
tf->sa);
ret = -EINVAL;
goto out;
}
params.sta = &sta->sta;
/* validate the initiator is set correctly */
local_initiator =
!memcmp(elems->lnk_id->init_sta, sdata->vif.addr, ETH_ALEN);
if (local_initiator == sta->sta.tdls_initiator) {
tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n");
ret = -EINVAL;
goto out;
}
/* peer should have known better */
if (!sta->sta.deflink.ht_cap.ht_supported && elems->sec_chan_offs &&
elems->sec_chan_offs->sec_chan_offs) {
tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n");
ret = -ENOTSUPP;
goto out;
}
params.chandef = &chandef;
params.switch_time = le16_to_cpu(elems->ch_sw_timing->switch_time);
params.switch_timeout = le16_to_cpu(elems->ch_sw_timing->switch_timeout);
params.tmpl_skb =
ieee80211_tdls_ch_sw_resp_tmpl_get(sta,
¶ms.ch_sw_tm_ie);
if (!params.tmpl_skb) {
ret = -ENOENT;
goto out;
}
drv_tdls_recv_channel_switch(sdata->local, sdata, ¶ms);
tdls_dbg(sdata,
"TDLS ch switch request received from %pM ch %d width %d\n",
tf->sa, params.chandef->chan->center_freq,
params.chandef->width);
out:
mutex_unlock(&local->sta_mtx);
dev_kfree_skb_any(params.tmpl_skb);
free:
kfree(elems);
return ret;
}
void
ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_tdls_data *tf = (void *)skb->data;
struct wiphy *wiphy = sdata->local->hw.wiphy;
lockdep_assert_wiphy(wiphy);
/* make sure the driver supports it */
if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH))
return;
/* we want to access the entire packet */
if (skb_linearize(skb))
return;
/*
* The packet/size was already validated by mac80211 Rx path, only look
* at the action type.
*/
switch (tf->action_code) {
case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
ieee80211_process_tdls_channel_switch_req(sdata, skb);
break;
case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
ieee80211_process_tdls_channel_switch_resp(sdata, skb);
break;
default:
WARN_ON_ONCE(1);
return;
}
}
void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
{
struct sta_info *sta;
u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
rcu_read_lock();
list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
continue;
ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
NL80211_TDLS_TEARDOWN, reason,
GFP_ATOMIC);
}
rcu_read_unlock();
}
void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
const u8 *peer, u16 reason)
{
struct ieee80211_sta *sta;
rcu_read_lock();
sta = ieee80211_find_sta(&sdata->vif, peer);
if (!sta || !sta->tdls) {
rcu_read_unlock();
return;
}
rcu_read_unlock();
tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n",
peer, reason,
ieee80211_get_reason_code_string(reason));
ieee80211_tdls_oper_request(&sdata->vif, peer,
NL80211_TDLS_TEARDOWN,
WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
GFP_ATOMIC);
}
| linux-master | net/mac80211/tdls.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Scanning implementation
*
* Copyright 2003, Jouni Malinen <[email protected]>
* Copyright 2004, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2013-2015 Intel Mobile Communications GmbH
* Copyright 2016-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2023 Intel Corporation
*/
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <net/sch_generic.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/random.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "mesh.h"
#define IEEE80211_PROBE_DELAY (HZ / 33)
#define IEEE80211_CHANNEL_TIME (HZ / 33)
#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 9)
void ieee80211_rx_bss_put(struct ieee80211_local *local,
struct ieee80211_bss *bss)
{
if (!bss)
return;
cfg80211_put_bss(local->hw.wiphy,
container_of((void *)bss, struct cfg80211_bss, priv));
}
static bool is_uapsd_supported(struct ieee802_11_elems *elems)
{
u8 qos_info;
if (elems->wmm_info && elems->wmm_info_len == 7
&& elems->wmm_info[5] == 1)
qos_info = elems->wmm_info[6];
else if (elems->wmm_param && elems->wmm_param_len == 24
&& elems->wmm_param[5] == 1)
qos_info = elems->wmm_param[6];
else
/* no valid wmm information or parameter element found */
return false;
return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD;
}
struct inform_bss_update_data {
struct ieee80211_rx_status *rx_status;
bool beacon;
};
void ieee80211_inform_bss(struct wiphy *wiphy,
struct cfg80211_bss *cbss,
const struct cfg80211_bss_ies *ies,
void *data)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct inform_bss_update_data *update_data = data;
struct ieee80211_bss *bss = (void *)cbss->priv;
struct ieee80211_rx_status *rx_status;
struct ieee802_11_elems *elems;
int clen, srlen;
/* This happens while joining an IBSS */
if (!update_data)
return;
elems = ieee802_11_parse_elems(ies->data, ies->len, false, NULL);
if (!elems)
return;
rx_status = update_data->rx_status;
if (update_data->beacon)
bss->device_ts_beacon = rx_status->device_timestamp;
else
bss->device_ts_presp = rx_status->device_timestamp;
if (elems->parse_error) {
if (update_data->beacon)
bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON;
else
bss->corrupt_data |= IEEE80211_BSS_CORRUPT_PROBE_RESP;
} else {
if (update_data->beacon)
bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_BEACON;
else
bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_PROBE_RESP;
}
/* save the ERP value so that it is available at association time */
if (elems->erp_info && (!elems->parse_error ||
!(bss->valid_data & IEEE80211_BSS_VALID_ERP))) {
bss->erp_value = elems->erp_info[0];
bss->has_erp_value = true;
if (!elems->parse_error)
bss->valid_data |= IEEE80211_BSS_VALID_ERP;
}
/* replace old supported rates if we get new values */
if (!elems->parse_error ||
!(bss->valid_data & IEEE80211_BSS_VALID_RATES)) {
srlen = 0;
if (elems->supp_rates) {
clen = IEEE80211_MAX_SUPP_RATES;
if (clen > elems->supp_rates_len)
clen = elems->supp_rates_len;
memcpy(bss->supp_rates, elems->supp_rates, clen);
srlen += clen;
}
if (elems->ext_supp_rates) {
clen = IEEE80211_MAX_SUPP_RATES - srlen;
if (clen > elems->ext_supp_rates_len)
clen = elems->ext_supp_rates_len;
memcpy(bss->supp_rates + srlen, elems->ext_supp_rates,
clen);
srlen += clen;
}
if (srlen) {
bss->supp_rates_len = srlen;
if (!elems->parse_error)
bss->valid_data |= IEEE80211_BSS_VALID_RATES;
}
}
if (!elems->parse_error ||
!(bss->valid_data & IEEE80211_BSS_VALID_WMM)) {
bss->wmm_used = elems->wmm_param || elems->wmm_info;
bss->uapsd_supported = is_uapsd_supported(elems);
if (!elems->parse_error)
bss->valid_data |= IEEE80211_BSS_VALID_WMM;
}
if (update_data->beacon) {
struct ieee80211_supported_band *sband =
local->hw.wiphy->bands[rx_status->band];
if (!(rx_status->encoding == RX_ENC_HT) &&
!(rx_status->encoding == RX_ENC_VHT))
bss->beacon_rate =
&sband->bitrates[rx_status->rate_idx];
}
if (elems->vht_cap_elem)
bss->vht_cap_info =
le32_to_cpu(elems->vht_cap_elem->vht_cap_info);
else
bss->vht_cap_info = 0;
kfree(elems);
}
struct ieee80211_bss *
ieee80211_bss_info_update(struct ieee80211_local *local,
struct ieee80211_rx_status *rx_status,
struct ieee80211_mgmt *mgmt, size_t len,
struct ieee80211_channel *channel)
{
bool beacon = ieee80211_is_beacon(mgmt->frame_control) ||
ieee80211_is_s1g_beacon(mgmt->frame_control);
struct cfg80211_bss *cbss;
struct inform_bss_update_data update_data = {
.rx_status = rx_status,
.beacon = beacon,
};
struct cfg80211_inform_bss bss_meta = {
.boottime_ns = rx_status->boottime_ns,
.drv_data = (void *)&update_data,
};
bool signal_valid;
struct ieee80211_sub_if_data *scan_sdata;
if (rx_status->flag & RX_FLAG_NO_SIGNAL_VAL)
bss_meta.signal = 0; /* invalid signal indication */
else if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
bss_meta.signal = rx_status->signal * 100;
else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC))
bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal;
bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20;
if (rx_status->bw == RATE_INFO_BW_5)
bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5;
else if (rx_status->bw == RATE_INFO_BW_10)
bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10;
bss_meta.chan = channel;
rcu_read_lock();
scan_sdata = rcu_dereference(local->scan_sdata);
if (scan_sdata && scan_sdata->vif.type == NL80211_IFTYPE_STATION &&
scan_sdata->vif.cfg.assoc &&
ieee80211_have_rx_timestamp(rx_status)) {
bss_meta.parent_tsf =
ieee80211_calculate_rx_timestamp(local, rx_status,
len + FCS_LEN, 24);
ether_addr_copy(bss_meta.parent_bssid,
scan_sdata->vif.bss_conf.bssid);
}
rcu_read_unlock();
cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta,
mgmt, len, GFP_ATOMIC);
if (!cbss)
return NULL;
/* In case the signal is invalid update the status */
signal_valid = channel == cbss->channel;
if (!signal_valid)
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
return (void *)cbss->priv;
}
static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata,
u32 scan_flags, const u8 *da)
{
if (!sdata)
return false;
/* accept broadcast for OCE */
if (scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP &&
is_broadcast_ether_addr(da))
return true;
if (scan_flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
return true;
return ether_addr_equal(da, sdata->vif.addr);
}
void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_sub_if_data *sdata1, *sdata2;
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_bss *bss;
struct ieee80211_channel *channel;
size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
u.probe_resp.variable);
if (!ieee80211_is_probe_resp(mgmt->frame_control) &&
!ieee80211_is_beacon(mgmt->frame_control) &&
!ieee80211_is_s1g_beacon(mgmt->frame_control))
return;
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
min_hdr_len = offsetof(struct ieee80211_ext,
u.s1g_short_beacon.variable);
else
min_hdr_len = offsetof(struct ieee80211_ext,
u.s1g_beacon);
}
if (skb->len < min_hdr_len)
return;
sdata1 = rcu_dereference(local->scan_sdata);
sdata2 = rcu_dereference(local->sched_scan_sdata);
if (likely(!sdata1 && !sdata2))
return;
if (test_and_clear_bit(SCAN_BEACON_WAIT, &local->scanning)) {
/*
* we were passive scanning because of radar/no-IR, but
* the beacon/proberesp rx gives us an opportunity to upgrade
* to active scan
*/
set_bit(SCAN_BEACON_DONE, &local->scanning);
ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
}
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
struct cfg80211_scan_request *scan_req;
struct cfg80211_sched_scan_request *sched_scan_req;
u32 scan_req_flags = 0, sched_scan_req_flags = 0;
scan_req = rcu_dereference(local->scan_req);
sched_scan_req = rcu_dereference(local->sched_scan_req);
if (scan_req)
scan_req_flags = scan_req->flags;
if (sched_scan_req)
sched_scan_req_flags = sched_scan_req->flags;
/* ignore ProbeResp to foreign address or non-bcast (OCE)
* unless scanning with randomised address
*/
if (!ieee80211_scan_accept_presp(sdata1, scan_req_flags,
mgmt->da) &&
!ieee80211_scan_accept_presp(sdata2, sched_scan_req_flags,
mgmt->da))
return;
}
channel = ieee80211_get_channel_khz(local->hw.wiphy,
ieee80211_rx_status_to_khz(rx_status));
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
return;
bss = ieee80211_bss_info_update(local, rx_status,
mgmt, skb->len,
channel);
if (bss)
ieee80211_rx_bss_put(local, bss);
}
static void
ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef,
enum nl80211_bss_scan_width scan_width)
{
memset(chandef, 0, sizeof(*chandef));
switch (scan_width) {
case NL80211_BSS_CHAN_WIDTH_5:
chandef->width = NL80211_CHAN_WIDTH_5;
break;
case NL80211_BSS_CHAN_WIDTH_10:
chandef->width = NL80211_CHAN_WIDTH_10;
break;
default:
chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
break;
}
}
/* return false if no more work */
static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct cfg80211_scan_request *req;
struct cfg80211_chan_def chandef;
u8 bands_used = 0;
int i, ielen, n_chans;
u32 flags = 0;
req = rcu_dereference_protected(local->scan_req,
lockdep_is_held(&local->mtx));
if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
return false;
if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) {
for (i = 0; i < req->n_channels; i++) {
local->hw_scan_req->req.channels[i] = req->channels[i];
bands_used |= BIT(req->channels[i]->band);
}
n_chans = req->n_channels;
} else {
do {
if (local->hw_scan_band == NUM_NL80211_BANDS)
return false;
n_chans = 0;
for (i = 0; i < req->n_channels; i++) {
if (req->channels[i]->band !=
local->hw_scan_band)
continue;
local->hw_scan_req->req.channels[n_chans] =
req->channels[i];
n_chans++;
bands_used |= BIT(req->channels[i]->band);
}
local->hw_scan_band++;
} while (!n_chans);
}
local->hw_scan_req->req.n_channels = n_chans;
ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT;
ielen = ieee80211_build_preq_ies(sdata,
(u8 *)local->hw_scan_req->req.ie,
local->hw_scan_ies_bufsize,
&local->hw_scan_req->ies,
req->ie, req->ie_len,
bands_used, req->rates, &chandef,
flags);
local->hw_scan_req->req.ie_len = ielen;
local->hw_scan_req->req.no_cck = req->no_cck;
ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr);
ether_addr_copy(local->hw_scan_req->req.mac_addr_mask,
req->mac_addr_mask);
ether_addr_copy(local->hw_scan_req->req.bssid, req->bssid);
return true;
}
static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
{
struct ieee80211_local *local = hw_to_local(hw);
bool hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning);
bool was_scanning = local->scanning;
struct cfg80211_scan_request *scan_req;
struct ieee80211_sub_if_data *scan_sdata;
struct ieee80211_sub_if_data *sdata;
lockdep_assert_held(&local->mtx);
/*
* It's ok to abort a not-yet-running scan (that
* we have one at all will be verified by checking
* local->scan_req next), but not to complete it
* successfully.
*/
if (WARN_ON(!local->scanning && !aborted))
aborted = true;
if (WARN_ON(!local->scan_req))
return;
scan_sdata = rcu_dereference_protected(local->scan_sdata,
lockdep_is_held(&local->mtx));
if (hw_scan && !aborted &&
!ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS) &&
ieee80211_prep_hw_scan(scan_sdata)) {
int rc;
rc = drv_hw_scan(local,
rcu_dereference_protected(local->scan_sdata,
lockdep_is_held(&local->mtx)),
local->hw_scan_req);
if (rc == 0)
return;
/* HW scan failed and is going to be reported as aborted,
* so clear old scan info.
*/
memset(&local->scan_info, 0, sizeof(local->scan_info));
aborted = true;
}
kfree(local->hw_scan_req);
local->hw_scan_req = NULL;
scan_req = rcu_dereference_protected(local->scan_req,
lockdep_is_held(&local->mtx));
RCU_INIT_POINTER(local->scan_req, NULL);
RCU_INIT_POINTER(local->scan_sdata, NULL);
local->scanning = 0;
local->scan_chandef.chan = NULL;
synchronize_rcu();
if (scan_req != local->int_scan_req) {
local->scan_info.aborted = aborted;
cfg80211_scan_done(scan_req, &local->scan_info);
}
/* Set power back to normal operating levels. */
ieee80211_hw_config(local, 0);
if (!hw_scan && was_scanning) {
ieee80211_configure_filter(local);
drv_sw_scan_complete(local, scan_sdata);
ieee80211_offchannel_return(local);
}
ieee80211_recalc_idle(local);
ieee80211_mlme_notify_scan_completed(local);
ieee80211_ibss_notify_scan_completed(local);
/* Requeue all the work that might have been ignored while
* the scan was in progress; if there was none this will
* just be a no-op for the particular interface.
*/
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (ieee80211_sdata_running(sdata))
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
}
if (was_scanning)
ieee80211_start_next_roc(local);
}
void ieee80211_scan_completed(struct ieee80211_hw *hw,
struct cfg80211_scan_info *info)
{
struct ieee80211_local *local = hw_to_local(hw);
trace_api_scan_completed(local, info->aborted);
set_bit(SCAN_COMPLETED, &local->scanning);
if (info->aborted)
set_bit(SCAN_ABORTED, &local->scanning);
memcpy(&local->scan_info, info, sizeof(*info));
ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
}
EXPORT_SYMBOL(ieee80211_scan_completed);
static int ieee80211_start_sw_scan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
/* Software scan is not supported in multi-channel cases */
if (local->use_chanctx)
return -EOPNOTSUPP;
/*
* Hardware/driver doesn't support hw_scan, so use software
* scanning instead. First send a nullfunc frame with power save
* bit on so that AP will buffer the frames for us while we are not
* listening, then send probe requests to each channel and wait for
* the responses. After all channels are scanned, tune back to the
* original channel and send a nullfunc frame with power save bit
* off to trigger the AP to send us all the buffered frames.
*
* Note that while local->sw_scanning is true everything else but
* nullfunc frames and probe requests will be dropped in
* ieee80211_tx_h_check_assoc().
*/
drv_sw_scan_start(local, sdata, local->scan_addr);
local->leave_oper_channel_time = jiffies;
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
ieee80211_offchannel_stop_vifs(local);
/* ensure nullfunc is transmitted before leaving operating channel */
ieee80211_flush_queues(local, NULL, false);
ieee80211_configure_filter(local);
/* We need to set power level at maximum rate for scanning. */
ieee80211_hw_config(local, 0);
ieee80211_queue_delayed_work(&local->hw,
&local->scan_work, 0);
return 0;
}
static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *sdata_iter;
if (!ieee80211_is_radar_required(local))
return true;
if (!regulatory_pre_cac_allowed(local->hw.wiphy))
return false;
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata_iter, &local->interfaces, list) {
if (sdata_iter->wdev.cac_started) {
mutex_unlock(&local->iflist_mtx);
return false;
}
}
mutex_unlock(&local->iflist_mtx);
return true;
}
static bool ieee80211_can_scan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
if (!__ieee80211_can_leave_ch(sdata))
return false;
if (!list_empty(&local->roc_list))
return false;
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
sdata->u.mgd.flags & IEEE80211_STA_CONNECTION_POLL)
return false;
return true;
}
void ieee80211_run_deferred_scan(struct ieee80211_local *local)
{
lockdep_assert_held(&local->mtx);
if (!local->scan_req || local->scanning)
return;
if (!ieee80211_can_scan(local,
rcu_dereference_protected(
local->scan_sdata,
lockdep_is_held(&local->mtx))))
return;
ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
round_jiffies_relative(0));
}
static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
const u8 *src, const u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
u32 ratemask, u32 flags, u32 tx_flags,
struct ieee80211_channel *channel)
{
struct sk_buff *skb;
skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel,
ssid, ssid_len,
ie, ie_len, flags);
if (skb) {
if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) {
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u16 sn = get_random_u16();
info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO;
hdr->seq_ctrl =
cpu_to_le16(IEEE80211_SN_TO_SEQ(sn));
}
IEEE80211_SKB_CB(skb)->flags |= tx_flags;
ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
}
}
static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
unsigned long *next_delay)
{
int i;
struct ieee80211_sub_if_data *sdata;
struct cfg80211_scan_request *scan_req;
enum nl80211_band band = local->hw.conf.chandef.chan->band;
u32 flags = 0, tx_flags;
scan_req = rcu_dereference_protected(local->scan_req,
lockdep_is_held(&local->mtx));
tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
if (scan_req->no_cck)
tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
if (scan_req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT;
if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_SN)
flags |= IEEE80211_PROBE_FLAG_RANDOM_SN;
sdata = rcu_dereference_protected(local->scan_sdata,
lockdep_is_held(&local->mtx));
for (i = 0; i < scan_req->n_ssids; i++)
ieee80211_send_scan_probe_req(
sdata, local->scan_addr, scan_req->bssid,
scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len,
scan_req->ie, scan_req->ie_len,
scan_req->rates[band], flags,
tx_flags, local->hw.conf.chandef.chan);
/*
* After sending probe requests, wait for probe responses
* on the channel.
*/
*next_delay = IEEE80211_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
}
static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req)
{
struct ieee80211_local *local = sdata->local;
bool hw_scan = local->ops->hw_scan;
int rc;
lockdep_assert_held(&local->mtx);
if (local->scan_req)
return -EBUSY;
if (!__ieee80211_can_leave_ch(sdata))
return -EBUSY;
if (!ieee80211_can_scan(local, sdata)) {
/* wait for the work to finish/time out */
rcu_assign_pointer(local->scan_req, req);
rcu_assign_pointer(local->scan_sdata, sdata);
return 0;
}
again:
if (hw_scan) {
u8 *ies;
local->hw_scan_ies_bufsize = local->scan_ies_len + req->ie_len;
if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) {
int i, n_bands = 0;
u8 bands_counted = 0;
for (i = 0; i < req->n_channels; i++) {
if (bands_counted & BIT(req->channels[i]->band))
continue;
bands_counted |= BIT(req->channels[i]->band);
n_bands++;
}
local->hw_scan_ies_bufsize *= n_bands;
}
local->hw_scan_req = kmalloc(
sizeof(*local->hw_scan_req) +
req->n_channels * sizeof(req->channels[0]) +
local->hw_scan_ies_bufsize, GFP_KERNEL);
if (!local->hw_scan_req)
return -ENOMEM;
local->hw_scan_req->req.ssids = req->ssids;
local->hw_scan_req->req.n_ssids = req->n_ssids;
ies = (u8 *)local->hw_scan_req +
sizeof(*local->hw_scan_req) +
req->n_channels * sizeof(req->channels[0]);
local->hw_scan_req->req.ie = ies;
local->hw_scan_req->req.flags = req->flags;
eth_broadcast_addr(local->hw_scan_req->req.bssid);
local->hw_scan_req->req.duration = req->duration;
local->hw_scan_req->req.duration_mandatory =
req->duration_mandatory;
local->hw_scan_band = 0;
local->hw_scan_req->req.n_6ghz_params = req->n_6ghz_params;
local->hw_scan_req->req.scan_6ghz_params =
req->scan_6ghz_params;
local->hw_scan_req->req.scan_6ghz = req->scan_6ghz;
/*
* After allocating local->hw_scan_req, we must
* go through until ieee80211_prep_hw_scan(), so
* anything that might be changed here and leave
* this function early must not go after this
* allocation.
*/
}
rcu_assign_pointer(local->scan_req, req);
rcu_assign_pointer(local->scan_sdata, sdata);
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
get_random_mask_addr(local->scan_addr,
req->mac_addr,
req->mac_addr_mask);
else
memcpy(local->scan_addr, sdata->vif.addr, ETH_ALEN);
if (hw_scan) {
__set_bit(SCAN_HW_SCANNING, &local->scanning);
} else if ((req->n_channels == 1) &&
(req->channels[0] == local->_oper_chandef.chan)) {
/*
* If we are scanning only on the operating channel
* then we do not need to stop normal activities
*/
unsigned long next_delay;
__set_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning);
ieee80211_recalc_idle(local);
/* Notify driver scan is starting, keep order of operations
* same as normal software scan, in case that matters. */
drv_sw_scan_start(local, sdata, local->scan_addr);
ieee80211_configure_filter(local); /* accept probe-responses */
/* We need to ensure power level is at max for scanning. */
ieee80211_hw_config(local, 0);
if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_RADAR)) ||
!req->n_ssids) {
next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
if (req->n_ssids)
set_bit(SCAN_BEACON_WAIT, &local->scanning);
} else {
ieee80211_scan_state_send_probe(local, &next_delay);
next_delay = IEEE80211_CHANNEL_TIME;
}
/* Now, just wait a bit and we are all done! */
ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
next_delay);
return 0;
} else {
/* Do normal software scan */
__set_bit(SCAN_SW_SCANNING, &local->scanning);
}
ieee80211_recalc_idle(local);
if (hw_scan) {
WARN_ON(!ieee80211_prep_hw_scan(sdata));
rc = drv_hw_scan(local, sdata, local->hw_scan_req);
} else {
rc = ieee80211_start_sw_scan(local, sdata);
}
if (rc) {
kfree(local->hw_scan_req);
local->hw_scan_req = NULL;
local->scanning = 0;
ieee80211_recalc_idle(local);
local->scan_req = NULL;
RCU_INIT_POINTER(local->scan_sdata, NULL);
}
if (hw_scan && rc == 1) {
/*
* we can't fall back to software for P2P-GO
* as it must update NoA etc.
*/
if (ieee80211_vif_type_p2p(&sdata->vif) ==
NL80211_IFTYPE_P2P_GO)
return -EOPNOTSUPP;
hw_scan = false;
goto again;
}
return rc;
}
static unsigned long
ieee80211_scan_get_channel_time(struct ieee80211_channel *chan)
{
/*
* TODO: channel switching also consumes quite some time,
* add that delay as well to get a better estimation
*/
if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR))
return IEEE80211_PASSIVE_CHANNEL_TIME;
return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME;
}
static void ieee80211_scan_state_decision(struct ieee80211_local *local,
unsigned long *next_delay)
{
bool associated = false;
bool tx_empty = true;
bool bad_latency;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_channel *next_chan;
enum mac80211_scan_state next_scan_state;
struct cfg80211_scan_request *scan_req;
/*
* check if at least one STA interface is associated,
* check if at least one STA interface has pending tx frames
* and grab the lowest used beacon interval
*/
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
if (sdata->u.mgd.associated) {
associated = true;
if (!qdisc_all_tx_empty(sdata->dev)) {
tx_empty = false;
break;
}
}
}
}
mutex_unlock(&local->iflist_mtx);
scan_req = rcu_dereference_protected(local->scan_req,
lockdep_is_held(&local->mtx));
next_chan = scan_req->channels[local->scan_channel_idx];
/*
* we're currently scanning a different channel, let's
* see if we can scan another channel without interfering
* with the current traffic situation.
*
* Keep good latency, do not stay off-channel more than 125 ms.
*/
bad_latency = time_after(jiffies +
ieee80211_scan_get_channel_time(next_chan),
local->leave_oper_channel_time + HZ / 8);
if (associated && !tx_empty) {
if (scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
next_scan_state = SCAN_ABORT;
else
next_scan_state = SCAN_SUSPEND;
} else if (associated && bad_latency) {
next_scan_state = SCAN_SUSPEND;
} else {
next_scan_state = SCAN_SET_CHANNEL;
}
local->next_scan_state = next_scan_state;
*next_delay = 0;
}
static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
unsigned long *next_delay)
{
int skip;
struct ieee80211_channel *chan;
enum nl80211_bss_scan_width oper_scan_width;
struct cfg80211_scan_request *scan_req;
scan_req = rcu_dereference_protected(local->scan_req,
lockdep_is_held(&local->mtx));
skip = 0;
chan = scan_req->channels[local->scan_channel_idx];
local->scan_chandef.chan = chan;
local->scan_chandef.center_freq1 = chan->center_freq;
local->scan_chandef.freq1_offset = chan->freq_offset;
local->scan_chandef.center_freq2 = 0;
/* For scanning on the S1G band, ignore scan_width (which is constant
* across all channels) for now since channel width is specific to each
* channel. Detect the required channel width here and likely revisit
* later. Maybe scan_width could be used to build the channel scan list?
*/
if (chan->band == NL80211_BAND_S1GHZ) {
local->scan_chandef.width = ieee80211_s1g_channel_width(chan);
goto set_channel;
}
switch (scan_req->scan_width) {
case NL80211_BSS_CHAN_WIDTH_5:
local->scan_chandef.width = NL80211_CHAN_WIDTH_5;
break;
case NL80211_BSS_CHAN_WIDTH_10:
local->scan_chandef.width = NL80211_CHAN_WIDTH_10;
break;
default:
case NL80211_BSS_CHAN_WIDTH_20:
/* If scanning on oper channel, use whatever channel-type
* is currently in use.
*/
oper_scan_width = cfg80211_chandef_to_scan_width(
&local->_oper_chandef);
if (chan == local->_oper_chandef.chan &&
oper_scan_width == scan_req->scan_width)
local->scan_chandef = local->_oper_chandef;
else
local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
break;
case NL80211_BSS_CHAN_WIDTH_1:
case NL80211_BSS_CHAN_WIDTH_2:
/* shouldn't get here, S1G handled above */
WARN_ON(1);
break;
}
set_channel:
if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
skip = 1;
/* advance state machine to next channel/band */
local->scan_channel_idx++;
if (skip) {
/* if we skip this channel return to the decision state */
local->next_scan_state = SCAN_DECISION;
return;
}
/*
* Probe delay is used to update the NAV, cf. 11.1.3.2.2
* (which unfortunately doesn't say _why_ step a) is done,
* but it waits for the probe delay or until a frame is
* received - and the received frame would update the NAV).
* For now, we do not support waiting until a frame is
* received.
*
* In any case, it is not necessary for a passive scan.
*/
if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) ||
!scan_req->n_ssids) {
*next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
if (scan_req->n_ssids)
set_bit(SCAN_BEACON_WAIT, &local->scanning);
return;
}
/* active scan, send probes */
*next_delay = IEEE80211_PROBE_DELAY;
local->next_scan_state = SCAN_SEND_PROBE;
}
static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
unsigned long *next_delay)
{
/* switch back to the operating channel */
local->scan_chandef.chan = NULL;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
/* disable PS */
ieee80211_offchannel_return(local);
*next_delay = HZ / 5;
/* afterwards, resume scan & go to next channel */
local->next_scan_state = SCAN_RESUME;
}
static void ieee80211_scan_state_resume(struct ieee80211_local *local,
unsigned long *next_delay)
{
ieee80211_offchannel_stop_vifs(local);
if (local->ops->flush) {
ieee80211_flush_queues(local, NULL, false);
*next_delay = 0;
} else
*next_delay = HZ / 10;
/* remember when we left the operating channel */
local->leave_oper_channel_time = jiffies;
/* advance to the next channel to be scanned */
local->next_scan_state = SCAN_SET_CHANNEL;
}
void ieee80211_scan_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, scan_work.work);
struct ieee80211_sub_if_data *sdata;
struct cfg80211_scan_request *scan_req;
unsigned long next_delay = 0;
bool aborted;
mutex_lock(&local->mtx);
if (!ieee80211_can_run_worker(local)) {
aborted = true;
goto out_complete;
}
sdata = rcu_dereference_protected(local->scan_sdata,
lockdep_is_held(&local->mtx));
scan_req = rcu_dereference_protected(local->scan_req,
lockdep_is_held(&local->mtx));
/* When scanning on-channel, the first-callback means completed. */
if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) {
aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
goto out_complete;
}
if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) {
aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
goto out_complete;
}
if (!sdata || !scan_req)
goto out;
if (!local->scanning) {
int rc;
RCU_INIT_POINTER(local->scan_req, NULL);
RCU_INIT_POINTER(local->scan_sdata, NULL);
rc = __ieee80211_start_scan(sdata, scan_req);
if (rc) {
/* need to complete scan in cfg80211 */
rcu_assign_pointer(local->scan_req, scan_req);
aborted = true;
goto out_complete;
} else
goto out;
}
clear_bit(SCAN_BEACON_WAIT, &local->scanning);
/*
* as long as no delay is required advance immediately
* without scheduling a new work
*/
do {
if (!ieee80211_sdata_running(sdata)) {
aborted = true;
goto out_complete;
}
if (test_and_clear_bit(SCAN_BEACON_DONE, &local->scanning) &&
local->next_scan_state == SCAN_DECISION)
local->next_scan_state = SCAN_SEND_PROBE;
switch (local->next_scan_state) {
case SCAN_DECISION:
/* if no more bands/channels left, complete scan */
if (local->scan_channel_idx >= scan_req->n_channels) {
aborted = false;
goto out_complete;
}
ieee80211_scan_state_decision(local, &next_delay);
break;
case SCAN_SET_CHANNEL:
ieee80211_scan_state_set_channel(local, &next_delay);
break;
case SCAN_SEND_PROBE:
ieee80211_scan_state_send_probe(local, &next_delay);
break;
case SCAN_SUSPEND:
ieee80211_scan_state_suspend(local, &next_delay);
break;
case SCAN_RESUME:
ieee80211_scan_state_resume(local, &next_delay);
break;
case SCAN_ABORT:
aborted = true;
goto out_complete;
}
} while (next_delay == 0);
ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay);
goto out;
out_complete:
__ieee80211_scan_completed(&local->hw, aborted);
out:
mutex_unlock(&local->mtx);
}
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req)
{
int res;
mutex_lock(&sdata->local->mtx);
res = __ieee80211_start_scan(sdata, req);
mutex_unlock(&sdata->local->mtx);
return res;
}
int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
const u8 *ssid, u8 ssid_len,
struct ieee80211_channel **channels,
unsigned int n_channels,
enum nl80211_bss_scan_width scan_width)
{
struct ieee80211_local *local = sdata->local;
int ret = -EBUSY, i, n_ch = 0;
enum nl80211_band band;
mutex_lock(&local->mtx);
/* busy scanning */
if (local->scan_req)
goto unlock;
/* fill internal scan request */
if (!channels) {
int max_n;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!local->hw.wiphy->bands[band] ||
band == NL80211_BAND_6GHZ)
continue;
max_n = local->hw.wiphy->bands[band]->n_channels;
for (i = 0; i < max_n; i++) {
struct ieee80211_channel *tmp_ch =
&local->hw.wiphy->bands[band]->channels[i];
if (tmp_ch->flags & (IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_DISABLED))
continue;
local->int_scan_req->channels[n_ch] = tmp_ch;
n_ch++;
}
}
if (WARN_ON_ONCE(n_ch == 0))
goto unlock;
local->int_scan_req->n_channels = n_ch;
} else {
for (i = 0; i < n_channels; i++) {
if (channels[i]->flags & (IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_DISABLED))
continue;
local->int_scan_req->channels[n_ch] = channels[i];
n_ch++;
}
if (WARN_ON_ONCE(n_ch == 0))
goto unlock;
local->int_scan_req->n_channels = n_ch;
}
local->int_scan_req->ssids = &local->scan_ssid;
local->int_scan_req->n_ssids = 1;
local->int_scan_req->scan_width = scan_width;
memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
local->int_scan_req->ssids[0].ssid_len = ssid_len;
ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req);
unlock:
mutex_unlock(&local->mtx);
return ret;
}
void ieee80211_scan_cancel(struct ieee80211_local *local)
{
/* ensure a new scan cannot be queued */
lockdep_assert_wiphy(local->hw.wiphy);
/*
* We are canceling software scan, or deferred scan that was not
* yet really started (see __ieee80211_start_scan ).
*
* Regarding hardware scan:
* - we can not call __ieee80211_scan_completed() as when
* SCAN_HW_SCANNING bit is set this function change
* local->hw_scan_req to operate on 5G band, what race with
* driver which can use local->hw_scan_req
*
* - we can not cancel scan_work since driver can schedule it
* by ieee80211_scan_completed(..., true) to finish scan
*
* Hence we only call the cancel_hw_scan() callback, but the low-level
* driver is still responsible for calling ieee80211_scan_completed()
* after the scan was completed/aborted.
*/
mutex_lock(&local->mtx);
if (!local->scan_req)
goto out;
/*
* We have a scan running and the driver already reported completion,
* but the worker hasn't run yet or is stuck on the mutex - mark it as
* cancelled.
*/
if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
test_bit(SCAN_COMPLETED, &local->scanning)) {
set_bit(SCAN_HW_CANCELLED, &local->scanning);
goto out;
}
if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
/*
* Make sure that __ieee80211_scan_completed doesn't trigger a
* scan on another band.
*/
set_bit(SCAN_HW_CANCELLED, &local->scanning);
if (local->ops->cancel_hw_scan)
drv_cancel_hw_scan(local,
rcu_dereference_protected(local->scan_sdata,
lockdep_is_held(&local->mtx)));
goto out;
}
/*
* If the work is currently running, it must be blocked on
* the mutex, but we'll set scan_sdata = NULL and it'll
* simply exit once it acquires the mutex.
*/
cancel_delayed_work(&local->scan_work);
/* and clean up */
memset(&local->scan_info, 0, sizeof(local->scan_info));
__ieee80211_scan_completed(&local->hw, true);
out:
mutex_unlock(&local->mtx);
}
int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
struct cfg80211_sched_scan_request *req)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_scan_ies sched_scan_ies = {};
struct cfg80211_chan_def chandef;
int ret, i, iebufsz, num_bands = 0;
u32 rate_masks[NUM_NL80211_BANDS] = {};
u8 bands_used = 0;
u8 *ie;
u32 flags = 0;
iebufsz = local->scan_ies_len + req->ie_len;
lockdep_assert_held(&local->mtx);
if (!local->ops->sched_scan_start)
return -ENOTSUPP;
for (i = 0; i < NUM_NL80211_BANDS; i++) {
if (local->hw.wiphy->bands[i]) {
bands_used |= BIT(i);
rate_masks[i] = (u32) -1;
num_bands++;
}
}
if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT;
ie = kcalloc(iebufsz, num_bands, GFP_KERNEL);
if (!ie) {
ret = -ENOMEM;
goto out;
}
ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz,
&sched_scan_ies, req->ie,
req->ie_len, bands_used, rate_masks, &chandef,
flags);
ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
if (ret == 0) {
rcu_assign_pointer(local->sched_scan_sdata, sdata);
rcu_assign_pointer(local->sched_scan_req, req);
}
kfree(ie);
out:
if (ret) {
/* Clean in case of failure after HW restart or upon resume. */
RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
RCU_INIT_POINTER(local->sched_scan_req, NULL);
}
return ret;
}
int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
struct cfg80211_sched_scan_request *req)
{
struct ieee80211_local *local = sdata->local;
int ret;
mutex_lock(&local->mtx);
if (rcu_access_pointer(local->sched_scan_sdata)) {
mutex_unlock(&local->mtx);
return -EBUSY;
}
ret = __ieee80211_request_sched_scan_start(sdata, req);
mutex_unlock(&local->mtx);
return ret;
}
int ieee80211_request_sched_scan_stop(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sched_scan_sdata;
int ret = -ENOENT;
mutex_lock(&local->mtx);
if (!local->ops->sched_scan_stop) {
ret = -ENOTSUPP;
goto out;
}
/* We don't want to restart sched scan anymore. */
RCU_INIT_POINTER(local->sched_scan_req, NULL);
sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata,
lockdep_is_held(&local->mtx));
if (sched_scan_sdata) {
ret = drv_sched_scan_stop(local, sched_scan_sdata);
if (!ret)
RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
}
out:
mutex_unlock(&local->mtx);
return ret;
}
void ieee80211_sched_scan_results(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
trace_api_sched_scan_results(local);
cfg80211_sched_scan_results(hw->wiphy, 0);
}
EXPORT_SYMBOL(ieee80211_sched_scan_results);
void ieee80211_sched_scan_end(struct ieee80211_local *local)
{
mutex_lock(&local->mtx);
if (!rcu_access_pointer(local->sched_scan_sdata)) {
mutex_unlock(&local->mtx);
return;
}
RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
/* If sched scan was aborted by the driver. */
RCU_INIT_POINTER(local->sched_scan_req, NULL);
mutex_unlock(&local->mtx);
cfg80211_sched_scan_stopped(local->hw.wiphy, 0);
}
void ieee80211_sched_scan_stopped_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local,
sched_scan_stopped_work);
ieee80211_sched_scan_end(local);
}
void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
trace_api_sched_scan_stopped(local);
/*
* this shouldn't really happen, so for simplicity
* simply ignore it, and let mac80211 reconfigure
* the sched scan later on.
*/
if (local->in_reconfig)
return;
schedule_work(&local->sched_scan_stopped_work);
}
EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
| linux-master | net/mac80211/scan.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
* Author: Luis Carlos Cobo <[email protected]>
*/
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
#include "wme.h"
#include "ieee80211_i.h"
#include "mesh.h"
#include <linux/rhashtable.h>
static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
{
/* Use last four bytes of hw addr as hash index */
return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
}
static const struct rhashtable_params mesh_rht_params = {
.nelem_hint = 2,
.automatic_shrinking = true,
.key_len = ETH_ALEN,
.key_offset = offsetof(struct mesh_path, dst),
.head_offset = offsetof(struct mesh_path, rhash),
.hashfn = mesh_table_hash,
};
static const struct rhashtable_params fast_tx_rht_params = {
.nelem_hint = 10,
.automatic_shrinking = true,
.key_len = ETH_ALEN,
.key_offset = offsetof(struct ieee80211_mesh_fast_tx, addr_key),
.head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash),
.hashfn = mesh_table_hash,
};
static void __mesh_fast_tx_entry_free(void *ptr, void *tblptr)
{
struct ieee80211_mesh_fast_tx *entry = ptr;
kfree_rcu(entry, fast_tx.rcu_head);
}
static void mesh_fast_tx_deinit(struct ieee80211_sub_if_data *sdata)
{
struct mesh_tx_cache *cache;
cache = &sdata->u.mesh.tx_cache;
rhashtable_free_and_destroy(&cache->rht,
__mesh_fast_tx_entry_free, NULL);
}
static void mesh_fast_tx_init(struct ieee80211_sub_if_data *sdata)
{
struct mesh_tx_cache *cache;
cache = &sdata->u.mesh.tx_cache;
rhashtable_init(&cache->rht, &fast_tx_rht_params);
INIT_HLIST_HEAD(&cache->walk_head);
spin_lock_init(&cache->walk_lock);
}
static inline bool mpath_expired(struct mesh_path *mpath)
{
return (mpath->flags & MESH_PATH_ACTIVE) &&
time_after(jiffies, mpath->exp_time) &&
!(mpath->flags & MESH_PATH_FIXED);
}
static void mesh_path_rht_free(void *ptr, void *tblptr)
{
struct mesh_path *mpath = ptr;
struct mesh_table *tbl = tblptr;
mesh_path_free_rcu(tbl, mpath);
}
static void mesh_table_init(struct mesh_table *tbl)
{
INIT_HLIST_HEAD(&tbl->known_gates);
INIT_HLIST_HEAD(&tbl->walk_head);
atomic_set(&tbl->entries, 0);
spin_lock_init(&tbl->gates_lock);
spin_lock_init(&tbl->walk_lock);
/* rhashtable_init() may fail only in case of wrong
* mesh_rht_params
*/
WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
}
static void mesh_table_free(struct mesh_table *tbl)
{
rhashtable_free_and_destroy(&tbl->rhead,
mesh_path_rht_free, tbl);
}
/**
* mesh_path_assign_nexthop - update mesh path next hop
*
* @mpath: mesh path to update
* @sta: next hop to assign
*
* Locking: mpath->state_lock must be held when calling this function
*/
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
unsigned long flags;
rcu_assign_pointer(mpath->next_hop, sta);
spin_lock_irqsave(&mpath->frame_queue.lock, flags);
skb_queue_walk(&mpath->frame_queue, skb) {
hdr = (struct ieee80211_hdr *) skb->data;
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
}
spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
}
static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
struct mesh_path *gate_mpath)
{
struct ieee80211_hdr *hdr;
struct ieee80211s_hdr *mshdr;
int mesh_hdrlen, hdrlen;
char *next_hop;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
if (!(mshdr->flags & MESH_FLAGS_AE)) {
/* size of the fixed part of the mesh header */
mesh_hdrlen = 6;
/* make room for the two extended addresses */
skb_push(skb, 2 * ETH_ALEN);
memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
hdr = (struct ieee80211_hdr *) skb->data;
/* we preserve the previous mesh header and only add
* the new addresses */
mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
mshdr->flags = MESH_FLAGS_AE_A5_A6;
memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
}
/* update next hop */
hdr = (struct ieee80211_hdr *) skb->data;
rcu_read_lock();
next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
memcpy(hdr->addr1, next_hop, ETH_ALEN);
rcu_read_unlock();
memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
memcpy(hdr->addr3, dst_addr, ETH_ALEN);
}
/**
* mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
*
* This function is used to transfer or copy frames from an unresolved mpath to
* a gate mpath. The function also adds the Address Extension field and
* updates the next hop.
*
* If a frame already has an Address Extension field, only the next hop and
* destination addresses are updated.
*
* The gate mpath must be an active mpath with a valid mpath->next_hop.
*
* @gate_mpath: An active mpath the frames will be sent to (i.e. the gate)
* @from_mpath: The failed mpath
* @copy: When true, copy all the frames to the new mpath queue. When false,
* move them.
*/
static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
struct mesh_path *from_mpath,
bool copy)
{
struct sk_buff *skb, *fskb, *tmp;
struct sk_buff_head failq;
unsigned long flags;
if (WARN_ON(gate_mpath == from_mpath))
return;
if (WARN_ON(!gate_mpath->next_hop))
return;
__skb_queue_head_init(&failq);
spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
skb_queue_splice_init(&from_mpath->frame_queue, &failq);
spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
skb_queue_walk_safe(&failq, fskb, tmp) {
if (skb_queue_len(&gate_mpath->frame_queue) >=
MESH_FRAME_QUEUE_LEN) {
mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
break;
}
skb = skb_copy(fskb, GFP_ATOMIC);
if (WARN_ON(!skb))
break;
prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
skb_queue_tail(&gate_mpath->frame_queue, skb);
if (copy)
continue;
__skb_unlink(fskb, &failq);
kfree_skb(fskb);
}
mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
if (!copy)
return;
spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
skb_queue_splice(&failq, &from_mpath->frame_queue);
spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
}
static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params);
if (mpath && mpath_expired(mpath)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&mpath->state_lock);
}
return mpath;
}
/**
* mesh_path_lookup - look up a path in the mesh path table
* @sdata: local subif
* @dst: hardware address (ETH_ALEN length) of destination
*
* Returns: pointer to the mesh path structure, or NULL if not found
*
* Locking: must be called within a read rcu section.
*/
struct mesh_path *
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
}
struct mesh_path *
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
}
static struct mesh_path *
__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
{
int i = 0;
struct mesh_path *mpath;
hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
if (i++ == idx)
break;
}
if (!mpath)
return NULL;
if (mpath_expired(mpath)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&mpath->state_lock);
}
return mpath;
}
/**
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
* @idx: index
* @sdata: local subif, or NULL for all entries
*
* Returns: pointer to the mesh path structure, or NULL if not found.
*
* Locking: must be called within a read rcu section.
*/
struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
}
/**
* mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
* @idx: index
* @sdata: local subif, or NULL for all entries
*
* Returns: pointer to the proxy path structure, or NULL if not found.
*
* Locking: must be called within a read rcu section.
*/
struct mesh_path *
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
}
/**
* mesh_path_add_gate - add the given mpath to a mesh gate to our path table
* @mpath: gate path to add to table
*/
int mesh_path_add_gate(struct mesh_path *mpath)
{
struct mesh_table *tbl;
int err;
rcu_read_lock();
tbl = &mpath->sdata->u.mesh.mesh_paths;
spin_lock_bh(&mpath->state_lock);
if (mpath->is_gate) {
err = -EEXIST;
spin_unlock_bh(&mpath->state_lock);
goto err_rcu;
}
mpath->is_gate = true;
mpath->sdata->u.mesh.num_gates++;
spin_lock(&tbl->gates_lock);
hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
spin_unlock(&tbl->gates_lock);
spin_unlock_bh(&mpath->state_lock);
mpath_dbg(mpath->sdata,
"Mesh path: Recorded new gate: %pM. %d known gates\n",
mpath->dst, mpath->sdata->u.mesh.num_gates);
err = 0;
err_rcu:
rcu_read_unlock();
return err;
}
/**
* mesh_gate_del - remove a mesh gate from the list of known gates
* @tbl: table which holds our list of known gates
* @mpath: gate mpath
*/
static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
lockdep_assert_held(&mpath->state_lock);
if (!mpath->is_gate)
return;
mpath->is_gate = false;
spin_lock_bh(&tbl->gates_lock);
hlist_del_rcu(&mpath->gate_list);
mpath->sdata->u.mesh.num_gates--;
spin_unlock_bh(&tbl->gates_lock);
mpath_dbg(mpath->sdata,
"Mesh path: Deleted gate: %pM. %d known gates\n",
mpath->dst, mpath->sdata->u.mesh.num_gates);
}
/**
* mesh_gate_num - number of gates known to this interface
* @sdata: subif data
*/
int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
{
return sdata->u.mesh.num_gates;
}
static
struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
const u8 *dst, gfp_t gfp_flags)
{
struct mesh_path *new_mpath;
new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
if (!new_mpath)
return NULL;
memcpy(new_mpath->dst, dst, ETH_ALEN);
eth_broadcast_addr(new_mpath->rann_snd_addr);
new_mpath->is_root = false;
new_mpath->sdata = sdata;
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
timer_setup(&new_mpath->timer, mesh_path_timer, 0);
return new_mpath;
}
static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache,
struct ieee80211_mesh_fast_tx *entry)
{
hlist_del_rcu(&entry->walk_list);
rhashtable_remove_fast(&cache->rht, &entry->rhash, fast_tx_rht_params);
kfree_rcu(entry, fast_tx.rcu_head);
}
struct ieee80211_mesh_fast_tx *
mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr)
{
struct ieee80211_mesh_fast_tx *entry;
struct mesh_tx_cache *cache;
cache = &sdata->u.mesh.tx_cache;
entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
if (!entry)
return NULL;
if (!(entry->mpath->flags & MESH_PATH_ACTIVE) ||
mpath_expired(entry->mpath)) {
spin_lock_bh(&cache->walk_lock);
entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
if (entry)
mesh_fast_tx_entry_free(cache, entry);
spin_unlock_bh(&cache->walk_lock);
return NULL;
}
mesh_path_refresh(sdata, entry->mpath, NULL);
if (entry->mppath)
entry->mppath->exp_time = jiffies;
entry->timestamp = jiffies;
return entry;
}
void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, struct mesh_path *mpath)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_mesh_fast_tx *entry, *prev;
struct ieee80211_mesh_fast_tx build = {};
struct ieee80211s_hdr *meshhdr;
struct mesh_tx_cache *cache;
struct ieee80211_key *key;
struct mesh_path *mppath;
struct sta_info *sta;
u8 *qc;
if (sdata->noack_map ||
!ieee80211_is_data_qos(hdr->frame_control))
return;
build.fast_tx.hdr_len = ieee80211_hdrlen(hdr->frame_control);
meshhdr = (struct ieee80211s_hdr *)(skb->data + build.fast_tx.hdr_len);
build.hdrlen = ieee80211_get_mesh_hdrlen(meshhdr);
cache = &sdata->u.mesh.tx_cache;
if (atomic_read(&cache->rht.nelems) >= MESH_FAST_TX_CACHE_MAX_SIZE)
return;
sta = rcu_dereference(mpath->next_hop);
if (!sta)
return;
if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) {
/* This is required to keep the mppath alive */
mppath = mpp_path_lookup(sdata, meshhdr->eaddr1);
if (!mppath)
return;
build.mppath = mppath;
} else if (ieee80211_has_a4(hdr->frame_control)) {
mppath = mpath;
} else {
return;
}
/* rate limit, in case fast xmit can't be enabled */
if (mppath->fast_tx_check == jiffies)
return;
mppath->fast_tx_check = jiffies;
/*
* Same use of the sta lock as in ieee80211_check_fast_xmit, in order
* to protect against concurrent sta key updates.
*/
spin_lock_bh(&sta->lock);
key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
if (!key)
key = rcu_access_pointer(sdata->default_unicast_key);
build.fast_tx.key = key;
if (key) {
bool gen_iv, iv_spc;
gen_iv = key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
iv_spc = key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
(key->flags & KEY_FLAG_TAINTED))
goto unlock_sta;
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
if (gen_iv)
build.fast_tx.pn_offs = build.fast_tx.hdr_len;
if (gen_iv || iv_spc)
build.fast_tx.hdr_len += IEEE80211_CCMP_HDR_LEN;
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
if (gen_iv)
build.fast_tx.pn_offs = build.fast_tx.hdr_len;
if (gen_iv || iv_spc)
build.fast_tx.hdr_len += IEEE80211_GCMP_HDR_LEN;
break;
default:
goto unlock_sta;
}
}
memcpy(build.addr_key, mppath->dst, ETH_ALEN);
build.timestamp = jiffies;
build.fast_tx.band = info->band;
build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3);
build.fast_tx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
build.mpath = mpath;
memcpy(build.hdr, meshhdr, build.hdrlen);
memcpy(build.hdr + build.hdrlen, rfc1042_header, sizeof(rfc1042_header));
build.hdrlen += sizeof(rfc1042_header);
memcpy(build.fast_tx.hdr, hdr, build.fast_tx.hdr_len);
hdr = (struct ieee80211_hdr *)build.fast_tx.hdr;
if (build.fast_tx.key)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
qc = ieee80211_get_qos_ctl(hdr);
qc[1] |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8;
entry = kmemdup(&build, sizeof(build), GFP_ATOMIC);
if (!entry)
goto unlock_sta;
spin_lock(&cache->walk_lock);
prev = rhashtable_lookup_get_insert_fast(&cache->rht,
&entry->rhash,
fast_tx_rht_params);
if (unlikely(IS_ERR(prev))) {
kfree(entry);
goto unlock_cache;
}
/*
* replace any previous entry in the hash table, in case we're
* replacing it with a different type (e.g. mpath -> mpp)
*/
if (unlikely(prev)) {
rhashtable_replace_fast(&cache->rht, &prev->rhash,
&entry->rhash, fast_tx_rht_params);
hlist_del_rcu(&prev->walk_list);
kfree_rcu(prev, fast_tx.rcu_head);
}
hlist_add_head(&entry->walk_list, &cache->walk_head);
unlock_cache:
spin_unlock(&cache->walk_lock);
unlock_sta:
spin_unlock_bh(&sta->lock);
}
void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata)
{
unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT);
struct mesh_tx_cache *cache;
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
cache = &sdata->u.mesh.tx_cache;
if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE)
return;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (!time_is_after_jiffies(entry->timestamp + timeout))
mesh_fast_tx_entry_free(cache, entry);
spin_unlock_bh(&cache->walk_lock);
}
void mesh_fast_tx_flush_mpath(struct mesh_path *mpath)
{
struct ieee80211_sub_if_data *sdata = mpath->sdata;
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (entry->mpath == mpath)
mesh_fast_tx_entry_free(cache, entry);
spin_unlock_bh(&cache->walk_lock);
}
void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (rcu_access_pointer(entry->mpath->next_hop) == sta)
mesh_fast_tx_entry_free(cache, entry);
spin_unlock_bh(&cache->walk_lock);
}
void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
const u8 *addr)
{
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
if (entry)
mesh_fast_tx_entry_free(cache, entry);
spin_unlock_bh(&cache->walk_lock);
}
/**
* mesh_path_add - allocate and add a new path to the mesh path table
* @dst: destination address of the path (ETH_ALEN length)
* @sdata: local subif
*
* Returns: 0 on success
*
* State: the initial state of the new path is set to 0
*/
struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
const u8 *dst)
{
struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath;
if (ether_addr_equal(dst, sdata->vif.addr))
/* never add ourselves as neighbours */
return ERR_PTR(-ENOTSUPP);
if (is_multicast_ether_addr(dst))
return ERR_PTR(-ENOTSUPP);
if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
return ERR_PTR(-ENOSPC);
new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
if (!new_mpath)
return ERR_PTR(-ENOMEM);
tbl = &sdata->u.mesh.mesh_paths;
spin_lock_bh(&tbl->walk_lock);
mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
&new_mpath->rhash,
mesh_rht_params);
if (!mpath)
hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
spin_unlock_bh(&tbl->walk_lock);
if (mpath) {
kfree(new_mpath);
if (IS_ERR(mpath))
return mpath;
new_mpath = mpath;
}
sdata->u.mesh.mesh_paths_generation++;
return new_mpath;
}
int mpp_path_add(struct ieee80211_sub_if_data *sdata,
const u8 *dst, const u8 *mpp)
{
struct mesh_table *tbl;
struct mesh_path *new_mpath;
int ret;
if (ether_addr_equal(dst, sdata->vif.addr))
/* never add ourselves as neighbours */
return -ENOTSUPP;
if (is_multicast_ether_addr(dst))
return -ENOTSUPP;
new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
if (!new_mpath)
return -ENOMEM;
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
tbl = &sdata->u.mesh.mpp_paths;
spin_lock_bh(&tbl->walk_lock);
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
&new_mpath->rhash,
mesh_rht_params);
if (!ret)
hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
spin_unlock_bh(&tbl->walk_lock);
if (ret)
kfree(new_mpath);
else
mesh_fast_tx_flush_addr(sdata, dst);
sdata->u.mesh.mpp_paths_generation++;
return ret;
}
/**
* mesh_plink_broken - deactivates paths and sends perr when a link breaks
*
* @sta: broken peer link
*
* This function must be called from the rate control algorithm if enough
* delivery errors suggest that a peer link is no longer usable.
*/
void mesh_plink_broken(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
rcu_read_lock();
hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
if (rcu_access_pointer(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
++mpath->sn;
spin_unlock_bh(&mpath->state_lock);
mesh_path_error_tx(sdata,
sdata->u.mesh.mshcfg.element_ttl,
mpath->dst, mpath->sn,
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
}
}
rcu_read_unlock();
}
static void mesh_path_free_rcu(struct mesh_table *tbl,
struct mesh_path *mpath)
{
struct ieee80211_sub_if_data *sdata = mpath->sdata;
spin_lock_bh(&mpath->state_lock);
mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
mesh_gate_del(tbl, mpath);
spin_unlock_bh(&mpath->state_lock);
timer_shutdown_sync(&mpath->timer);
atomic_dec(&sdata->u.mesh.mpaths);
atomic_dec(&tbl->entries);
mesh_path_flush_pending(mpath);
kfree_rcu(mpath, rcu);
}
static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
hlist_del_rcu(&mpath->walk_list);
rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
if (tbl == &mpath->sdata->u.mesh.mpp_paths)
mesh_fast_tx_flush_addr(mpath->sdata, mpath->dst);
else
mesh_fast_tx_flush_mpath(mpath);
mesh_path_free_rcu(tbl, mpath);
}
/**
* mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
*
* @sta: mesh peer to match
*
* RCU notes: this function is called when a mesh plink transitions from
* PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
* allows path creation. This will happen before the sta can be freed (because
* sta_info_destroy() calls this) so any reader in a rcu read block will be
* protected against the plink disappearing.
*/
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
struct mesh_path *mpath;
struct hlist_node *n;
spin_lock_bh(&tbl->walk_lock);
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if (rcu_access_pointer(mpath->next_hop) == sta)
__mesh_path_del(tbl, mpath);
}
spin_unlock_bh(&tbl->walk_lock);
}
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
const u8 *proxy)
{
struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
struct mesh_path *mpath;
struct hlist_node *n;
spin_lock_bh(&tbl->walk_lock);
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if (ether_addr_equal(mpath->mpp, proxy))
__mesh_path_del(tbl, mpath);
}
spin_unlock_bh(&tbl->walk_lock);
}
static void table_flush_by_iface(struct mesh_table *tbl)
{
struct mesh_path *mpath;
struct hlist_node *n;
spin_lock_bh(&tbl->walk_lock);
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
__mesh_path_del(tbl, mpath);
}
spin_unlock_bh(&tbl->walk_lock);
}
/**
* mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
*
* This function deletes both mesh paths as well as mesh portal paths.
*
* @sdata: interface data to match
*
*/
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
table_flush_by_iface(&sdata->u.mesh.mesh_paths);
table_flush_by_iface(&sdata->u.mesh.mpp_paths);
}
/**
* table_path_del - delete a path from the mesh or mpp table
*
* @tbl: mesh or mpp path table
* @sdata: local subif
* @addr: dst address (ETH_ALEN length)
*
* Returns: 0 if successful
*/
static int table_path_del(struct mesh_table *tbl,
struct ieee80211_sub_if_data *sdata,
const u8 *addr)
{
struct mesh_path *mpath;
spin_lock_bh(&tbl->walk_lock);
mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
if (!mpath) {
spin_unlock_bh(&tbl->walk_lock);
return -ENXIO;
}
__mesh_path_del(tbl, mpath);
spin_unlock_bh(&tbl->walk_lock);
return 0;
}
/**
* mesh_path_del - delete a mesh path from the table
*
* @addr: dst address (ETH_ALEN length)
* @sdata: local subif
*
* Returns: 0 if successful
*/
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
{
int err;
/* flush relevant mpp entries first */
mpp_flush_by_proxy(sdata, addr);
err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
sdata->u.mesh.mesh_paths_generation++;
return err;
}
/**
* mesh_path_tx_pending - sends pending frames in a mesh path queue
*
* @mpath: mesh path to activate
*
* Locking: the state_lock of the mpath structure must NOT be held when calling
* this function.
*/
void mesh_path_tx_pending(struct mesh_path *mpath)
{
if (mpath->flags & MESH_PATH_ACTIVE)
ieee80211_add_pending_skbs(mpath->sdata->local,
&mpath->frame_queue);
}
/**
* mesh_path_send_to_gates - sends pending frames to all known mesh gates
*
* @mpath: mesh path whose queue will be emptied
*
* If there is only one gate, the frames are transferred from the failed mpath
* queue to that gate's queue. If there are more than one gates, the frames
* are copied from each gate to the next. After frames are copied, the
* mpath queues are emptied onto the transmission queue.
*/
int mesh_path_send_to_gates(struct mesh_path *mpath)
{
struct ieee80211_sub_if_data *sdata = mpath->sdata;
struct mesh_table *tbl;
struct mesh_path *from_mpath = mpath;
struct mesh_path *gate;
bool copy = false;
tbl = &sdata->u.mesh.mesh_paths;
rcu_read_lock();
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
if (gate->flags & MESH_PATH_ACTIVE) {
mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
mesh_path_move_to_queue(gate, from_mpath, copy);
from_mpath = gate;
copy = true;
} else {
mpath_dbg(sdata,
"Not forwarding to %pM (flags %#x)\n",
gate->dst, gate->flags);
}
}
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
mesh_path_tx_pending(gate);
}
rcu_read_unlock();
return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
}
/**
* mesh_path_discard_frame - discard a frame whose path could not be resolved
*
* @skb: frame to discard
* @sdata: network subif the frame was to be sent through
*
* Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
ieee80211_free_txskb(&sdata->local->hw, skb);
sdata->u.mesh.mshstats.dropped_frames_no_route++;
}
/**
* mesh_path_flush_pending - free the pending queue of a mesh path
*
* @mpath: mesh path whose queue has to be freed
*
* Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_flush_pending(struct mesh_path *mpath)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
mesh_path_discard_frame(mpath->sdata, skb);
}
/**
* mesh_path_fix_nexthop - force a specific next hop for a mesh path
*
* @mpath: the mesh path to modify
* @next_hop: the next hop to force
*
* Locking: this function must be called holding mpath->state_lock
*/
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
{
spin_lock_bh(&mpath->state_lock);
mesh_path_assign_nexthop(mpath, next_hop);
mpath->sn = 0xffff;
mpath->metric = 0;
mpath->hop_count = 0;
mpath->exp_time = 0;
mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
mesh_path_activate(mpath);
mesh_fast_tx_flush_mpath(mpath);
spin_unlock_bh(&mpath->state_lock);
ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg);
/* init it at a low value - 0 start is tricky */
ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1);
mesh_path_tx_pending(mpath);
}
void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
{
mesh_table_init(&sdata->u.mesh.mesh_paths);
mesh_table_init(&sdata->u.mesh.mpp_paths);
mesh_fast_tx_init(sdata);
}
static
void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl)
{
struct mesh_path *mpath;
struct hlist_node *n;
spin_lock_bh(&tbl->walk_lock);
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
__mesh_path_del(tbl, mpath);
}
spin_unlock_bh(&tbl->walk_lock);
}
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
}
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
{
mesh_fast_tx_deinit(sdata);
mesh_table_free(&sdata->u.mesh.mesh_paths);
mesh_table_free(&sdata->u.mesh.mpp_paths);
}
| linux-master | net/mac80211/mesh_pathtbl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S1G handling
* Copyright(c) 2020 Adapt-IP
*/
#include <linux/ieee80211.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
void ieee80211_s1g_sta_rate_init(struct sta_info *sta)
{
/* avoid indicating legacy bitrates for S1G STAs */
sta->deflink.tx_stats.last_rate.flags |= IEEE80211_TX_RC_S1G_MCS;
sta->deflink.rx_stats.last_rate =
STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_S1G);
}
bool ieee80211_s1g_is_twt_setup(struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
if (likely(!ieee80211_is_action(mgmt->frame_control)))
return false;
if (likely(mgmt->u.action.category != WLAN_CATEGORY_S1G))
return false;
return mgmt->u.action.u.s1g.action_code == WLAN_S1G_TWT_SETUP;
}
static void
ieee80211_s1g_send_twt_setup(struct ieee80211_sub_if_data *sdata, const u8 *da,
const u8 *bssid, struct ieee80211_twt_setup *twt)
{
int len = IEEE80211_MIN_ACTION_SIZE + 4 + twt->length;
struct ieee80211_local *local = sdata->local;
struct ieee80211_mgmt *mgmt;
struct sk_buff *skb;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = skb_put_zero(skb, len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, bssid, ETH_ALEN);
mgmt->u.action.category = WLAN_CATEGORY_S1G;
mgmt->u.action.u.s1g.action_code = WLAN_S1G_TWT_SETUP;
memcpy(mgmt->u.action.u.s1g.variable, twt, 3 + twt->length);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_INTFL_MLME_CONN_TX |
IEEE80211_TX_CTL_REQ_TX_STATUS;
ieee80211_tx_skb(sdata, skb);
}
static void
ieee80211_s1g_send_twt_teardown(struct ieee80211_sub_if_data *sdata,
const u8 *da, const u8 *bssid, u8 flowid)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_mgmt *mgmt;
struct sk_buff *skb;
u8 *id;
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
IEEE80211_MIN_ACTION_SIZE + 2);
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = skb_put_zero(skb, IEEE80211_MIN_ACTION_SIZE + 2);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, bssid, ETH_ALEN);
mgmt->u.action.category = WLAN_CATEGORY_S1G;
mgmt->u.action.u.s1g.action_code = WLAN_S1G_TWT_TEARDOWN;
id = (u8 *)mgmt->u.action.u.s1g.variable;
*id = flowid;
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_CTL_REQ_TX_STATUS;
ieee80211_tx_skb(sdata, skb);
}
static void
ieee80211_s1g_rx_twt_setup(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_twt_setup *twt = (void *)mgmt->u.action.u.s1g.variable;
struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
twt_agrt->req_type &= cpu_to_le16(~IEEE80211_TWT_REQTYPE_REQUEST);
/* broadcast TWT not supported yet */
if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) {
twt_agrt->req_type &=
~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
twt_agrt->req_type |=
le16_encode_bits(TWT_SETUP_CMD_REJECT,
IEEE80211_TWT_REQTYPE_SETUP_CMD);
goto out;
}
/* TWT Information not supported yet */
twt->control |= IEEE80211_TWT_CONTROL_RX_DISABLED;
drv_add_twt_setup(sdata->local, sdata, &sta->sta, twt);
out:
ieee80211_s1g_send_twt_setup(sdata, mgmt->sa, sdata->vif.addr, twt);
}
static void
ieee80211_s1g_rx_twt_teardown(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
drv_twt_teardown_request(sdata->local, sdata, &sta->sta,
mgmt->u.action.u.s1g.variable[0]);
}
static void
ieee80211_s1g_tx_twt_setup_fail(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
struct ieee80211_twt_setup *twt = (void *)mgmt->u.action.u.s1g.variable;
struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
u8 flowid = le16_get_bits(twt_agrt->req_type,
IEEE80211_TWT_REQTYPE_FLOWID);
drv_twt_teardown_request(sdata->local, sdata, &sta->sta, flowid);
ieee80211_s1g_send_twt_teardown(sdata, mgmt->sa, sdata->vif.addr,
flowid);
}
void ieee80211_s1g_rx_twt_action(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
mutex_lock(&local->sta_mtx);
sta = sta_info_get_bss(sdata, mgmt->sa);
if (!sta)
goto out;
switch (mgmt->u.action.u.s1g.action_code) {
case WLAN_S1G_TWT_SETUP:
ieee80211_s1g_rx_twt_setup(sdata, sta, skb);
break;
case WLAN_S1G_TWT_TEARDOWN:
ieee80211_s1g_rx_twt_teardown(sdata, sta, skb);
break;
default:
break;
}
out:
mutex_unlock(&local->sta_mtx);
}
void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
mutex_lock(&local->sta_mtx);
sta = sta_info_get_bss(sdata, mgmt->da);
if (!sta)
goto out;
switch (mgmt->u.action.u.s1g.action_code) {
case WLAN_S1G_TWT_SETUP:
/* process failed twt setup frames */
ieee80211_s1g_tx_twt_setup_fail(sdata, sta, skb);
break;
default:
break;
}
out:
mutex_unlock(&local->sta_mtx);
}
| linux-master | net/mac80211/s1g.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Off-channel operation helpers
*
* Copyright 2003, Jouni Malinen <[email protected]>
* Copyright 2004, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2009 Johannes Berg <[email protected]>
* Copyright (C) 2019, 2022-2023 Intel Corporation
*/
#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
/*
* Tell our hardware to disable PS.
* Optionally inform AP that we will go to sleep so that it will buffer
* the frames while we are doing off-channel work. This is optional
* because we *may* be doing work on-operating channel, and want our
* hardware unconditionally awake, but still let the AP send us normal frames.
*/
static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
bool offchannel_ps_enabled = false;
/* FIXME: what to do when local->pspolling is true? */
del_timer_sync(&local->dynamic_ps_timer);
del_timer_sync(&ifmgd->bcn_mon_timer);
del_timer_sync(&ifmgd->conn_mon_timer);
cancel_work_sync(&local->dynamic_ps_enable_work);
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
offchannel_ps_enabled = true;
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
if (!offchannel_ps_enabled ||
!ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
/*
* If power save was enabled, no need to send a nullfunc
* frame because AP knows that we are sleeping. But if the
* hardware is creating the nullfunc frame for power save
* status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
* enabled) and power save was enabled, the firmware just
* sent a null frame with power save disabled. So we need
* to send a new nullfunc frame to inform the AP that we
* are again sleeping.
*/
ieee80211_send_nullfunc(local, sdata, true);
}
/* inform AP that we are awake again */
static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
if (!local->ps_sdata)
ieee80211_send_nullfunc(local, sdata, false);
else if (local->hw.conf.dynamic_ps_timeout > 0) {
/*
* the dynamic_ps_timer had been running before leaving the
* operating channel, restart the timer now and send a nullfunc
* frame to inform the AP that we are awake so that AP sends
* the buffered packets (if any).
*/
ieee80211_send_nullfunc(local, sdata, false);
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
ieee80211_sta_reset_beacon_monitor(sdata);
ieee80211_sta_reset_conn_monitor(sdata);
}
void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
if (WARN_ON(local->use_chanctx))
return;
/*
* notify the AP about us leaving the channel and stop all
* STA interfaces.
*/
/*
* Stop queues and transmit all frames queued by the driver
* before sending nullfunc to enable powersave at the AP.
*/
ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
false);
ieee80211_flush_queues(local, NULL, false);
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
sdata->vif.type == NL80211_IFTYPE_NAN)
continue;
if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
/* Check to see if we should disable beaconing. */
if (sdata->vif.bss_conf.enable_beacon) {
set_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
&sdata->state);
sdata->vif.bss_conf.enable_beacon = false;
ieee80211_link_info_change_notify(
sdata, &sdata->deflink,
BSS_CHANGED_BEACON_ENABLED);
}
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
sdata->u.mgd.associated)
ieee80211_offchannel_ps_enable(sdata);
}
mutex_unlock(&local->iflist_mtx);
}
void ieee80211_offchannel_return(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
if (WARN_ON(local->use_chanctx))
return;
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
continue;
if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
if (!ieee80211_sdata_running(sdata))
continue;
/* Tell AP we're back */
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
sdata->u.mgd.associated)
ieee80211_offchannel_ps_disable(sdata);
if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
&sdata->state)) {
sdata->vif.bss_conf.enable_beacon = true;
ieee80211_link_info_change_notify(
sdata, &sdata->deflink,
BSS_CHANGED_BEACON_ENABLED);
}
}
mutex_unlock(&local->iflist_mtx);
ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
false);
}
static void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
{
/* was never transmitted */
if (roc->frame) {
cfg80211_mgmt_tx_status(&roc->sdata->wdev, roc->mgmt_tx_cookie,
roc->frame->data, roc->frame->len,
false, GFP_KERNEL);
ieee80211_free_txskb(&roc->sdata->local->hw, roc->frame);
}
if (!roc->mgmt_tx_cookie)
cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
roc->cookie, roc->chan,
GFP_KERNEL);
else
cfg80211_tx_mgmt_expired(&roc->sdata->wdev,
roc->mgmt_tx_cookie,
roc->chan, GFP_KERNEL);
list_del(&roc->list);
kfree(roc);
}
static unsigned long ieee80211_end_finished_rocs(struct ieee80211_local *local,
unsigned long now)
{
struct ieee80211_roc_work *roc, *tmp;
long remaining_dur_min = LONG_MAX;
lockdep_assert_held(&local->mtx);
list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
long remaining;
if (!roc->started)
break;
remaining = roc->start_time +
msecs_to_jiffies(roc->duration) -
now;
/* In case of HW ROC, it is possible that the HW finished the
* ROC session before the actual requested time. In such a case
* end the ROC session (disregarding the remaining time).
*/
if (roc->abort || roc->hw_begun || remaining <= 0)
ieee80211_roc_notify_destroy(roc);
else
remaining_dur_min = min(remaining_dur_min, remaining);
}
return remaining_dur_min;
}
static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
unsigned long now)
{
long dur = ieee80211_end_finished_rocs(local, now);
if (dur == LONG_MAX)
return false;
mod_delayed_work(local->workqueue, &local->roc_work, dur);
return true;
}
static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
unsigned long start_time)
{
if (WARN_ON(roc->notified))
return;
roc->start_time = start_time;
roc->started = true;
if (roc->mgmt_tx_cookie) {
if (!WARN_ON(!roc->frame)) {
ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7,
roc->chan->band);
roc->frame = NULL;
}
} else {
cfg80211_ready_on_channel(&roc->sdata->wdev, roc->cookie,
roc->chan, roc->req_duration,
GFP_KERNEL);
}
roc->notified = true;
}
static void ieee80211_hw_roc_start(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, hw_roc_start);
struct ieee80211_roc_work *roc;
mutex_lock(&local->mtx);
list_for_each_entry(roc, &local->roc_list, list) {
if (!roc->started)
break;
roc->hw_begun = true;
ieee80211_handle_roc_started(roc, local->hw_roc_start_time);
}
mutex_unlock(&local->mtx);
}
void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
local->hw_roc_start_time = jiffies;
trace_api_ready_on_channel(local);
ieee80211_queue_work(hw, &local->hw_roc_start);
}
EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
static void _ieee80211_start_next_roc(struct ieee80211_local *local)
{
struct ieee80211_roc_work *roc, *tmp;
enum ieee80211_roc_type type;
u32 min_dur, max_dur;
lockdep_assert_held(&local->mtx);
if (WARN_ON(list_empty(&local->roc_list)))
return;
roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
list);
if (WARN_ON(roc->started))
return;
min_dur = roc->duration;
max_dur = roc->duration;
type = roc->type;
list_for_each_entry(tmp, &local->roc_list, list) {
if (tmp == roc)
continue;
if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
break;
max_dur = max(tmp->duration, max_dur);
min_dur = min(tmp->duration, min_dur);
type = max(tmp->type, type);
}
if (local->ops->remain_on_channel) {
int ret = drv_remain_on_channel(local, roc->sdata, roc->chan,
max_dur, type);
if (ret) {
wiphy_warn(local->hw.wiphy,
"failed to start next HW ROC (%d)\n", ret);
/*
* queue the work struct again to avoid recursion
* when multiple failures occur
*/
list_for_each_entry(tmp, &local->roc_list, list) {
if (tmp->sdata != roc->sdata ||
tmp->chan != roc->chan)
break;
tmp->started = true;
tmp->abort = true;
}
ieee80211_queue_work(&local->hw, &local->hw_roc_done);
return;
}
/* we'll notify about the start once the HW calls back */
list_for_each_entry(tmp, &local->roc_list, list) {
if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
break;
tmp->started = true;
}
} else {
/* If actually operating on the desired channel (with at least
* 20 MHz channel width) don't stop all the operations but still
* treat it as though the ROC operation started properly, so
* other ROC operations won't interfere with this one.
*/
roc->on_channel = roc->chan == local->_oper_chandef.chan &&
local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
/* start this ROC */
ieee80211_recalc_idle(local);
if (!roc->on_channel) {
ieee80211_offchannel_stop_vifs(local);
local->tmp_channel = roc->chan;
ieee80211_hw_config(local, 0);
}
ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
msecs_to_jiffies(min_dur));
/* tell userspace or send frame(s) */
list_for_each_entry(tmp, &local->roc_list, list) {
if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
break;
tmp->on_channel = roc->on_channel;
ieee80211_handle_roc_started(tmp, jiffies);
}
}
}
void ieee80211_start_next_roc(struct ieee80211_local *local)
{
struct ieee80211_roc_work *roc;
lockdep_assert_held(&local->mtx);
if (list_empty(&local->roc_list)) {
ieee80211_run_deferred_scan(local);
return;
}
/* defer roc if driver is not started (i.e. during reconfig) */
if (local->in_reconfig)
return;
roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
list);
if (WARN_ON_ONCE(roc->started))
return;
if (local->ops->remain_on_channel) {
_ieee80211_start_next_roc(local);
} else {
/* delay it a bit */
ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
round_jiffies_relative(HZ/2));
}
}
static void __ieee80211_roc_work(struct ieee80211_local *local)
{
struct ieee80211_roc_work *roc;
bool on_channel;
lockdep_assert_held(&local->mtx);
if (WARN_ON(local->ops->remain_on_channel))
return;
roc = list_first_entry_or_null(&local->roc_list,
struct ieee80211_roc_work, list);
if (!roc)
return;
if (!roc->started) {
WARN_ON(local->use_chanctx);
_ieee80211_start_next_roc(local);
} else {
on_channel = roc->on_channel;
if (ieee80211_recalc_sw_work(local, jiffies))
return;
/* careful - roc pointer became invalid during recalc */
if (!on_channel) {
ieee80211_flush_queues(local, NULL, false);
local->tmp_channel = NULL;
ieee80211_hw_config(local, 0);
ieee80211_offchannel_return(local);
}
ieee80211_recalc_idle(local);
ieee80211_start_next_roc(local);
}
}
static void ieee80211_roc_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, roc_work.work);
mutex_lock(&local->mtx);
__ieee80211_roc_work(local);
mutex_unlock(&local->mtx);
}
static void ieee80211_hw_roc_done(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, hw_roc_done);
mutex_lock(&local->mtx);
ieee80211_end_finished_rocs(local, jiffies);
/* if there's another roc, start it now */
ieee80211_start_next_roc(local);
mutex_unlock(&local->mtx);
}
void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
trace_api_remain_on_channel_expired(local);
ieee80211_queue_work(hw, &local->hw_roc_done);
}
EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
static bool
ieee80211_coalesce_hw_started_roc(struct ieee80211_local *local,
struct ieee80211_roc_work *new_roc,
struct ieee80211_roc_work *cur_roc)
{
unsigned long now = jiffies;
unsigned long remaining;
if (WARN_ON(!cur_roc->started))
return false;
/* if it was scheduled in the hardware, but not started yet,
* we can only combine if the older one had a longer duration
*/
if (!cur_roc->hw_begun && new_roc->duration > cur_roc->duration)
return false;
remaining = cur_roc->start_time +
msecs_to_jiffies(cur_roc->duration) -
now;
/* if it doesn't fit entirely, schedule a new one */
if (new_roc->duration > jiffies_to_msecs(remaining))
return false;
/* add just after the current one so we combine their finish later */
list_add(&new_roc->list, &cur_roc->list);
/* if the existing one has already begun then let this one also
* begin, otherwise they'll both be marked properly by the work
* struct that runs once the driver notifies us of the beginning
*/
if (cur_roc->hw_begun) {
new_roc->hw_begun = true;
ieee80211_handle_roc_started(new_roc, now);
}
return true;
}
static int ieee80211_start_roc_work(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *channel,
unsigned int duration, u64 *cookie,
struct sk_buff *txskb,
enum ieee80211_roc_type type)
{
struct ieee80211_roc_work *roc, *tmp;
bool queued = false, combine_started = true;
int ret;
lockdep_assert_held(&local->mtx);
if (channel->freq_offset)
/* this may work, but is untested */
return -EOPNOTSUPP;
if (local->use_chanctx && !local->ops->remain_on_channel)
return -EOPNOTSUPP;
roc = kzalloc(sizeof(*roc), GFP_KERNEL);
if (!roc)
return -ENOMEM;
/*
* If the duration is zero, then the driver
* wouldn't actually do anything. Set it to
* 10 for now.
*
* TODO: cancel the off-channel operation
* when we get the SKB's TX status and
* the wait time was zero before.
*/
if (!duration)
duration = 10;
roc->chan = channel;
roc->duration = duration;
roc->req_duration = duration;
roc->frame = txskb;
roc->type = type;
roc->sdata = sdata;
/*
* cookie is either the roc cookie (for normal roc)
* or the SKB (for mgmt TX)
*/
if (!txskb) {
roc->cookie = ieee80211_mgmt_tx_cookie(local);
*cookie = roc->cookie;
} else {
roc->mgmt_tx_cookie = *cookie;
}
/* if there's no need to queue, handle it immediately */
if (list_empty(&local->roc_list) &&
!local->scanning && !ieee80211_is_radar_required(local)) {
/* if not HW assist, just queue & schedule work */
if (!local->ops->remain_on_channel) {
list_add_tail(&roc->list, &local->roc_list);
ieee80211_queue_delayed_work(&local->hw,
&local->roc_work, 0);
} else {
/* otherwise actually kick it off here
* (for error handling)
*/
ret = drv_remain_on_channel(local, sdata, channel,
duration, type);
if (ret) {
kfree(roc);
return ret;
}
roc->started = true;
list_add_tail(&roc->list, &local->roc_list);
}
return 0;
}
/* otherwise handle queueing */
list_for_each_entry(tmp, &local->roc_list, list) {
if (tmp->chan != channel || tmp->sdata != sdata)
continue;
/*
* Extend this ROC if possible: If it hasn't started, add
* just after the new one to combine.
*/
if (!tmp->started) {
list_add(&roc->list, &tmp->list);
queued = true;
break;
}
if (!combine_started)
continue;
if (!local->ops->remain_on_channel) {
/* If there's no hardware remain-on-channel, and
* doing so won't push us over the maximum r-o-c
* we allow, then we can just add the new one to
* the list and mark it as having started now.
* If it would push over the limit, don't try to
* combine with other started ones (that haven't
* been running as long) but potentially sort it
* with others that had the same fate.
*/
unsigned long now = jiffies;
u32 elapsed = jiffies_to_msecs(now - tmp->start_time);
struct wiphy *wiphy = local->hw.wiphy;
u32 max_roc = wiphy->max_remain_on_channel_duration;
if (elapsed + roc->duration > max_roc) {
combine_started = false;
continue;
}
list_add(&roc->list, &tmp->list);
queued = true;
roc->on_channel = tmp->on_channel;
ieee80211_handle_roc_started(roc, now);
ieee80211_recalc_sw_work(local, now);
break;
}
queued = ieee80211_coalesce_hw_started_roc(local, roc, tmp);
if (queued)
break;
/* if it wasn't queued, perhaps it can be combined with
* another that also couldn't get combined previously,
* but no need to check for already started ones, since
* that can't work.
*/
combine_started = false;
}
if (!queued)
list_add_tail(&roc->list, &local->roc_list);
return 0;
}
int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan,
unsigned int duration, u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
struct ieee80211_local *local = sdata->local;
int ret;
mutex_lock(&local->mtx);
ret = ieee80211_start_roc_work(local, sdata, chan,
duration, cookie, NULL,
IEEE80211_ROC_TYPE_NORMAL);
mutex_unlock(&local->mtx);
return ret;
}
static int ieee80211_cancel_roc(struct ieee80211_local *local,
u64 cookie, bool mgmt_tx)
{
struct ieee80211_roc_work *roc, *tmp, *found = NULL;
int ret;
if (!cookie)
return -ENOENT;
flush_work(&local->hw_roc_start);
mutex_lock(&local->mtx);
list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
if (!mgmt_tx && roc->cookie != cookie)
continue;
else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
continue;
found = roc;
break;
}
if (!found) {
mutex_unlock(&local->mtx);
return -ENOENT;
}
if (!found->started) {
ieee80211_roc_notify_destroy(found);
goto out_unlock;
}
if (local->ops->remain_on_channel) {
ret = drv_cancel_remain_on_channel(local, roc->sdata);
if (WARN_ON_ONCE(ret)) {
mutex_unlock(&local->mtx);
return ret;
}
/* TODO:
* if multiple items were combined here then we really shouldn't
* cancel them all - we should wait for as much time as needed
* for the longest remaining one, and only then cancel ...
*/
list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
if (!roc->started)
break;
if (roc == found)
found = NULL;
ieee80211_roc_notify_destroy(roc);
}
/* that really must not happen - it was started */
WARN_ON(found);
ieee80211_start_next_roc(local);
} else {
/* go through work struct to return to the operating channel */
found->abort = true;
mod_delayed_work(local->workqueue, &local->roc_work, 0);
}
out_unlock:
mutex_unlock(&local->mtx);
return 0;
}
int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev, u64 cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
struct ieee80211_local *local = sdata->local;
return ieee80211_cancel_roc(local, cookie, false);
}
int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params, u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct sta_info *sta = NULL;
const struct ieee80211_mgmt *mgmt = (void *)params->buf;
bool need_offchan = false;
bool mlo_sta = false;
int link_id = -1;
u32 flags;
int ret;
u8 *data;
if (params->dont_wait_for_ack)
flags = IEEE80211_TX_CTL_NO_ACK;
else
flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
IEEE80211_TX_CTL_REQ_TX_STATUS;
if (params->no_cck)
flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
switch (sdata->vif.type) {
case NL80211_IFTYPE_ADHOC:
if (!sdata->vif.cfg.ibss_joined)
need_offchan = true;
#ifdef CONFIG_MAC80211_MESH
fallthrough;
case NL80211_IFTYPE_MESH_POINT:
if (ieee80211_vif_is_mesh(&sdata->vif) &&
!sdata->u.mesh.mesh_id_len)
need_offchan = true;
#endif
fallthrough;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_P2P_GO:
if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
!ieee80211_vif_is_mesh(&sdata->vif) &&
!sdata->bss->active)
need_offchan = true;
rcu_read_lock();
sta = sta_info_get_bss(sdata, mgmt->da);
mlo_sta = sta && sta->sta.mlo;
if (!ieee80211_is_action(mgmt->frame_control) ||
mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED ||
mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) {
rcu_read_unlock();
break;
}
if (!sta) {
rcu_read_unlock();
return -ENOLINK;
}
if (params->link_id >= 0 &&
!(sta->sta.valid_links & BIT(params->link_id))) {
rcu_read_unlock();
return -ENOLINK;
}
link_id = params->link_id;
rcu_read_unlock();
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
sdata_lock(sdata);
if (!sdata->u.mgd.associated ||
(params->offchan && params->wait &&
local->ops->remain_on_channel &&
memcmp(sdata->vif.cfg.ap_addr, mgmt->bssid, ETH_ALEN)))
need_offchan = true;
sdata_unlock(sdata);
break;
case NL80211_IFTYPE_P2P_DEVICE:
need_offchan = true;
break;
case NL80211_IFTYPE_NAN:
default:
return -EOPNOTSUPP;
}
/* configurations requiring offchan cannot work if no channel has been
* specified
*/
if (need_offchan && !params->chan)
return -EINVAL;
mutex_lock(&local->mtx);
/* Check if the operating channel is the requested channel */
if (!params->chan && mlo_sta) {
need_offchan = false;
} else if (!need_offchan) {
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
int i;
rcu_read_lock();
/* Check all the links first */
for (i = 0; i < ARRAY_SIZE(sdata->vif.link_conf); i++) {
struct ieee80211_bss_conf *conf;
conf = rcu_dereference(sdata->vif.link_conf[i]);
if (!conf)
continue;
chanctx_conf = rcu_dereference(conf->chanctx_conf);
if (!chanctx_conf)
continue;
if (mlo_sta && params->chan == chanctx_conf->def.chan &&
ether_addr_equal(sdata->vif.addr, mgmt->sa)) {
link_id = i;
break;
}
if (ether_addr_equal(conf->addr, mgmt->sa))
break;
chanctx_conf = NULL;
}
if (chanctx_conf) {
need_offchan = params->chan &&
(params->chan !=
chanctx_conf->def.chan);
} else {
need_offchan = true;
}
rcu_read_unlock();
}
if (need_offchan && !params->offchan) {
ret = -EBUSY;
goto out_unlock;
}
skb = dev_alloc_skb(local->hw.extra_tx_headroom + params->len);
if (!skb) {
ret = -ENOMEM;
goto out_unlock;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
data = skb_put_data(skb, params->buf, params->len);
/* Update CSA counters */
if (sdata->vif.bss_conf.csa_active &&
(sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
params->n_csa_offsets) {
int i;
struct beacon_data *beacon = NULL;
rcu_read_lock();
if (sdata->vif.type == NL80211_IFTYPE_AP)
beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
beacon = rcu_dereference(sdata->u.ibss.presp);
else if (ieee80211_vif_is_mesh(&sdata->vif))
beacon = rcu_dereference(sdata->u.mesh.beacon);
if (beacon)
for (i = 0; i < params->n_csa_offsets; i++)
data[params->csa_offsets[i]] =
beacon->cntdwn_current_counter;
rcu_read_unlock();
}
IEEE80211_SKB_CB(skb)->flags = flags;
skb->dev = sdata->dev;
if (!params->dont_wait_for_ack) {
/* make a copy to preserve the frame contents
* in case of encryption.
*/
ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_KERNEL);
if (ret) {
kfree_skb(skb);
goto out_unlock;
}
} else {
/* Assign a dummy non-zero cookie, it's not sent to
* userspace in this case but we rely on its value
* internally in the need_offchan case to distinguish
* mgmt-tx from remain-on-channel.
*/
*cookie = 0xffffffff;
}
if (!need_offchan) {
ieee80211_tx_skb_tid(sdata, skb, 7, link_id);
ret = 0;
goto out_unlock;
}
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
IEEE80211_SKB_CB(skb)->hw_queue =
local->hw.offchannel_tx_hw_queue;
/* This will handle all kinds of coalescing and immediate TX */
ret = ieee80211_start_roc_work(local, sdata, params->chan,
params->wait, cookie, skb,
IEEE80211_ROC_TYPE_MGMT_TX);
if (ret)
ieee80211_free_txskb(&local->hw, skb);
out_unlock:
mutex_unlock(&local->mtx);
return ret;
}
int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
struct wireless_dev *wdev, u64 cookie)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
return ieee80211_cancel_roc(local, cookie, true);
}
void ieee80211_roc_setup(struct ieee80211_local *local)
{
INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
INIT_LIST_HEAD(&local->roc_list);
}
void ieee80211_roc_purge(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_roc_work *roc, *tmp;
bool work_to_do = false;
mutex_lock(&local->mtx);
list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
if (sdata && roc->sdata != sdata)
continue;
if (roc->started) {
if (local->ops->remain_on_channel) {
/* can race, so ignore return value */
drv_cancel_remain_on_channel(local, roc->sdata);
ieee80211_roc_notify_destroy(roc);
} else {
roc->abort = true;
work_to_do = true;
}
} else {
ieee80211_roc_notify_destroy(roc);
}
}
if (work_to_do)
__ieee80211_roc_work(local);
mutex_unlock(&local->mtx);
}
| linux-master | net/mac80211/offchannel.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* spectrum management
*
* Copyright 2003, Jouni Malinen <[email protected]>
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2007-2008, Intel Corporation
* Copyright 2008, Johannes Berg <[email protected]>
* Copyright (C) 2018, 2020, 2022 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "sta_info.h"
#include "wme.h"
int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band current_band,
u32 vht_cap_info,
ieee80211_conn_flags_t conn_flags, u8 *bssid,
struct ieee80211_csa_ie *csa_ie)
{
enum nl80211_band new_band = current_band;
int new_freq;
u8 new_chan_no;
struct ieee80211_channel *new_chan;
struct cfg80211_chan_def new_vht_chandef = {};
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
int secondary_channel_offset = -1;
memset(csa_ie, 0, sizeof(*csa_ie));
sec_chan_offs = elems->sec_chan_offs;
wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
if (conn_flags & (IEEE80211_CONN_DISABLE_HT |
IEEE80211_CONN_DISABLE_40MHZ)) {
sec_chan_offs = NULL;
wide_bw_chansw_ie = NULL;
}
if (conn_flags & IEEE80211_CONN_DISABLE_VHT)
wide_bw_chansw_ie = NULL;
if (elems->ext_chansw_ie) {
if (!ieee80211_operating_class_to_band(
elems->ext_chansw_ie->new_operating_class,
&new_band)) {
sdata_info(sdata,
"cannot understand ECSA IE operating class, %d, ignoring\n",
elems->ext_chansw_ie->new_operating_class);
}
new_chan_no = elems->ext_chansw_ie->new_ch_num;
csa_ie->count = elems->ext_chansw_ie->count;
csa_ie->mode = elems->ext_chansw_ie->mode;
} else if (elems->ch_switch_ie) {
new_chan_no = elems->ch_switch_ie->new_ch_num;
csa_ie->count = elems->ch_switch_ie->count;
csa_ie->mode = elems->ch_switch_ie->mode;
} else {
/* nothing here we understand */
return 1;
}
/* Mesh Channel Switch Parameters Element */
if (elems->mesh_chansw_params_ie) {
csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl;
csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags;
csa_ie->pre_value = le16_to_cpu(
elems->mesh_chansw_params_ie->mesh_pre_value);
if (elems->mesh_chansw_params_ie->mesh_flags &
WLAN_EID_CHAN_SWITCH_PARAM_REASON)
csa_ie->reason_code = le16_to_cpu(
elems->mesh_chansw_params_ie->mesh_reason);
}
new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) {
sdata_info(sdata,
"BSS %pM switches to unsupported channel (%d MHz), disconnecting\n",
bssid, new_freq);
return -EINVAL;
}
if (sec_chan_offs) {
secondary_channel_offset = sec_chan_offs->sec_chan_offs;
} else if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) {
/* If the secondary channel offset IE is not present,
* we can't know what's the post-CSA offset, so the
* best we can do is use 20MHz.
*/
secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
}
switch (secondary_channel_offset) {
default:
/* secondary_channel_offset was present but is invalid */
case IEEE80211_HT_PARAM_CHA_SEC_NONE:
cfg80211_chandef_create(&csa_ie->chandef, new_chan,
NL80211_CHAN_HT20);
break;
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
cfg80211_chandef_create(&csa_ie->chandef, new_chan,
NL80211_CHAN_HT40PLUS);
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
cfg80211_chandef_create(&csa_ie->chandef, new_chan,
NL80211_CHAN_HT40MINUS);
break;
case -1:
cfg80211_chandef_create(&csa_ie->chandef, new_chan,
NL80211_CHAN_NO_HT);
/* keep width for 5/10 MHz channels */
switch (sdata->vif.bss_conf.chandef.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
csa_ie->chandef.width =
sdata->vif.bss_conf.chandef.width;
break;
default:
break;
}
break;
}
if (wide_bw_chansw_ie) {
u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1;
struct ieee80211_vht_operation vht_oper = {
.chan_width =
wide_bw_chansw_ie->new_channel_width,
.center_freq_seg0_idx =
wide_bw_chansw_ie->new_center_freq_seg0,
.center_freq_seg1_idx = new_seg1,
/* .basic_mcs_set doesn't matter */
};
struct ieee80211_ht_operation ht_oper = {
.operation_mode =
cpu_to_le16(new_seg1 <<
IEEE80211_HT_OP_MODE_CCFS2_SHIFT),
};
/* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
* to the previously parsed chandef
*/
new_vht_chandef = csa_ie->chandef;
/* ignore if parsing fails */
if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
vht_cap_info,
&vht_oper, &ht_oper,
&new_vht_chandef))
new_vht_chandef.chan = NULL;
if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ &&
new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
ieee80211_chandef_downgrade(&new_vht_chandef);
if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ &&
new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
ieee80211_chandef_downgrade(&new_vht_chandef);
}
/* if VHT data is there validate & use it */
if (new_vht_chandef.chan) {
if (!cfg80211_chandef_compatible(&new_vht_chandef,
&csa_ie->chandef)) {
sdata_info(sdata,
"BSS %pM: CSA has inconsistent channel data, disconnecting\n",
bssid);
return -EINVAL;
}
csa_ie->chandef = new_vht_chandef;
}
if (elems->max_channel_switch_time)
csa_ie->max_switch_time =
(elems->max_channel_switch_time[0] << 0) |
(elems->max_channel_switch_time[1] << 8) |
(elems->max_channel_switch_time[2] << 16);
return 0;
}
static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata,
struct ieee80211_msrment_ie *request_ie,
const u8 *da, const u8 *bssid,
u8 dialog_token)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *msr_report;
skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
sizeof(struct ieee80211_msrment_ie));
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
msr_report = skb_put_zero(skb, 24);
memcpy(msr_report->da, da, ETH_ALEN);
memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN);
memcpy(msr_report->bssid, bssid, ETH_ALEN);
msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
msr_report->u.action.u.measurement.action_code =
WLAN_ACTION_SPCT_MSR_RPRT;
msr_report->u.action.u.measurement.dialog_token = dialog_token;
msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT;
msr_report->u.action.u.measurement.length =
sizeof(struct ieee80211_msrment_ie);
memset(&msr_report->u.action.u.measurement.msr_elem, 0,
sizeof(struct ieee80211_msrment_ie));
msr_report->u.action.u.measurement.msr_elem.token = request_ie->token;
msr_report->u.action.u.measurement.msr_elem.mode |=
IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
ieee80211_tx_skb(sdata, skb);
}
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
size_t len)
{
/*
* Ignoring measurement request is spec violation.
* Mandatory measurements must be reported optional
* measurements might be refused or reported incapable
* For now just refuse
* TODO: Answer basic measurement as unmeasured
*/
ieee80211_send_refuse_measurement_request(sdata,
&mgmt->u.action.u.measurement.msr_elem,
mgmt->sa, mgmt->bssid,
mgmt->u.action.u.measurement.dialog_token);
}
| linux-master | net/mac80211/spectmgmt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HT handling
*
* Copyright 2003, Jouni Malinen <[email protected]>
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2007-2010, Intel Corporation
* Copyright(c) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018 - 2023 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "wme.h"
/**
* DOC: TX A-MPDU aggregation
*
* Aggregation on the TX side requires setting the hardware flag
* %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed
* packets with a flag indicating A-MPDU aggregation. The driver
* or device is responsible for actually aggregating the frames,
* as well as deciding how many and which to aggregate.
*
* When TX aggregation is started by some subsystem (usually the rate
* control algorithm would be appropriate) by calling the
* ieee80211_start_tx_ba_session() function, the driver will be
* notified via its @ampdu_action function, with the
* %IEEE80211_AMPDU_TX_START action.
*
* In response to that, the driver is later required to call the
* ieee80211_start_tx_ba_cb_irqsafe() function, which will really
* start the aggregation session after the peer has also responded.
* If the peer responds negatively, the session will be stopped
* again right away. Note that it is possible for the aggregation
* session to be stopped before the driver has indicated that it
* is done setting it up, in which case it must not indicate the
* setup completion.
*
* Also note that, since we also need to wait for a response from
* the peer, the driver is notified of the completion of the
* handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the
* @ampdu_action callback.
*
* Similarly, when the aggregation session is stopped by the peer
* or something calling ieee80211_stop_tx_ba_session(), the driver's
* @ampdu_action function will be called with the action
* %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail,
* and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe().
* Note that the sta can get destroyed before the BA tear down is
* complete.
*/
static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
const u8 *da, u16 tid,
u8 dialog_token, u16 start_seq_num,
u16 agg_size, u16 timeout)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
u16 capab;
skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = skb_put_zero(skb, 24);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
mgmt->u.action.category = WLAN_CATEGORY_BACK;
mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
mgmt->u.action.u.addba_req.dialog_token = dialog_token;
capab = IEEE80211_ADDBA_PARAM_AMSDU_MASK;
capab |= IEEE80211_ADDBA_PARAM_POLICY_MASK;
capab |= u16_encode_bits(tid, IEEE80211_ADDBA_PARAM_TID_MASK);
capab |= u16_encode_bits(agg_size, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
mgmt->u.action.u.addba_req.start_seq_num =
cpu_to_le16(start_seq_num << 4);
ieee80211_tx_skb_tid(sdata, skb, tid, -1);
}
void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_bar *bar;
u16 bar_control = 0;
skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
bar = skb_put_zero(skb, sizeof(*bar));
bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_BACK_REQ);
memcpy(bar->ra, ra, ETH_ALEN);
memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT);
bar->control = cpu_to_le16(bar_control);
bar->start_seq_num = cpu_to_le16(ssn);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_CTL_REQ_TX_STATUS;
ieee80211_tx_skb_tid(sdata, skb, tid, -1);
}
EXPORT_SYMBOL(ieee80211_send_bar);
void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
struct tid_ampdu_tx *tid_tx)
{
lockdep_assert_held(&sta->ampdu_mlme.mtx);
lockdep_assert_held(&sta->lock);
rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
}
/*
* When multiple aggregation sessions on multiple stations
* are being created/destroyed simultaneously, we need to
* refcount the global queue stop caused by that in order
* to not get into a situation where one of the aggregation
* setup or teardown re-enables queues before the other is
* ready to handle that.
*
* These two functions take care of this issue by keeping
* a global "agg_queue_stop" refcount.
*/
static void __acquires(agg_queue)
ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
{
int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
/* we do refcounting here, so don't use the queue reason refcounting */
if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
ieee80211_stop_queue_by_reason(
&sdata->local->hw, queue,
IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
false);
__acquire(agg_queue);
}
static void __releases(agg_queue)
ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
{
int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
ieee80211_wake_queue_by_reason(
&sdata->local->hw, queue,
IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
false);
__release(agg_queue);
}
static void
ieee80211_agg_stop_txq(struct sta_info *sta, int tid)
{
struct ieee80211_txq *txq = sta->sta.txq[tid];
struct ieee80211_sub_if_data *sdata;
struct fq *fq;
struct txq_info *txqi;
if (!txq)
return;
txqi = to_txq_info(txq);
sdata = vif_to_sdata(txq->vif);
fq = &sdata->local->fq;
/* Lock here to protect against further seqno updates on dequeue */
spin_lock_bh(&fq->lock);
set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
spin_unlock_bh(&fq->lock);
}
static void
ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
{
struct ieee80211_txq *txq = sta->sta.txq[tid];
struct txq_info *txqi;
lockdep_assert_held(&sta->ampdu_mlme.mtx);
if (!txq)
return;
txqi = to_txq_info(txq);
if (enable)
set_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
else
clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
local_bh_disable();
rcu_read_lock();
schedule_and_wake_txq(sta->sdata->local, txqi);
rcu_read_unlock();
local_bh_enable();
}
/*
* splice packets from the STA's pending to the local pending,
* requires a call to ieee80211_agg_splice_finish later
*/
static void __acquires(agg_queue)
ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_tx *tid_tx, u16 tid)
{
struct ieee80211_local *local = sdata->local;
int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
unsigned long flags;
ieee80211_stop_queue_agg(sdata, tid);
if (WARN(!tid_tx,
"TID %d gone but expected when splicing aggregates from the pending queue\n",
tid))
return;
if (!skb_queue_empty(&tid_tx->pending)) {
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
/* copy over remaining packets */
skb_queue_splice_tail_init(&tid_tx->pending,
&local->pending[queue]);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
}
static void __releases(agg_queue)
ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
{
ieee80211_wake_queue_agg(sdata, tid);
}
static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
{
struct tid_ampdu_tx *tid_tx;
lockdep_assert_held(&sta->ampdu_mlme.mtx);
lockdep_assert_held(&sta->lock);
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/*
* When we get here, the TX path will not be lockless any more wrt.
* aggregation, since the OPERATIONAL bit has long been cleared.
* Thus it will block on getting the lock, if it occurs. So if we
* stop the queue now, we will not get any more packets, and any
* that might be being processed will wait for us here, thereby
* guaranteeing that no packets go to the tid_tx pending queue any
* more.
*/
ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
/* future packets must not find the tid_tx struct any more */
ieee80211_assign_tid_tx(sta, tid, NULL);
ieee80211_agg_splice_finish(sta->sdata, tid);
kfree_rcu(tid_tx, rcu_head);
}
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_agg_stop_reason reason)
{
struct ieee80211_local *local = sta->local;
struct tid_ampdu_tx *tid_tx;
struct ieee80211_ampdu_params params = {
.sta = &sta->sta,
.tid = tid,
.buf_size = 0,
.amsdu = false,
.timeout = 0,
.ssn = 0,
};
int ret;
lockdep_assert_held(&sta->ampdu_mlme.mtx);
switch (reason) {
case AGG_STOP_DECLINED:
case AGG_STOP_LOCAL_REQUEST:
case AGG_STOP_PEER_REQUEST:
params.action = IEEE80211_AMPDU_TX_STOP_CONT;
break;
case AGG_STOP_DESTROY_STA:
params.action = IEEE80211_AMPDU_TX_STOP_FLUSH;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
spin_lock_bh(&sta->lock);
/* free struct pending for start, if present */
tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
kfree(tid_tx);
sta->ampdu_mlme.tid_start_tx[tid] = NULL;
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
if (!tid_tx) {
spin_unlock_bh(&sta->lock);
return -ENOENT;
}
/*
* if we're already stopping ignore any new requests to stop
* unless we're destroying it in which case notify the driver
*/
if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
spin_unlock_bh(&sta->lock);
if (reason != AGG_STOP_DESTROY_STA)
return -EALREADY;
params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT;
ret = drv_ampdu_action(local, sta->sdata, ¶ms);
WARN_ON_ONCE(ret);
return 0;
}
if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
/* not even started yet! */
ieee80211_assign_tid_tx(sta, tid, NULL);
spin_unlock_bh(&sta->lock);
kfree_rcu(tid_tx, rcu_head);
return 0;
}
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
ieee80211_agg_stop_txq(sta, tid);
spin_unlock_bh(&sta->lock);
ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
sta->sta.addr, tid);
del_timer_sync(&tid_tx->addba_resp_timer);
del_timer_sync(&tid_tx->session_timer);
/*
* After this packets are no longer handed right through
* to the driver but are put onto tid_tx->pending instead,
* with locking to ensure proper access.
*/
clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
/*
* There might be a few packets being processed right now (on
* another CPU) that have already gotten past the aggregation
* check when it was still OPERATIONAL and consequently have
* IEEE80211_TX_CTL_AMPDU set. In that case, this code might
* call into the driver at the same time or even before the
* TX paths calls into it, which could confuse the driver.
*
* Wait for all currently running TX paths to finish before
* telling the driver. New packets will not go through since
* the aggregation session is no longer OPERATIONAL.
*/
if (!local->in_reconfig)
synchronize_net();
tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
WLAN_BACK_RECIPIENT :
WLAN_BACK_INITIATOR;
tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
ret = drv_ampdu_action(local, sta->sdata, ¶ms);
/* HW shall not deny going back to legacy */
if (WARN_ON(ret)) {
/*
* We may have pending packets get stuck in this case...
* Not bothering with a workaround for now.
*/
}
/*
* In the case of AGG_STOP_DESTROY_STA, the driver won't
* necessarily call ieee80211_stop_tx_ba_cb(), so this may
* seem like we can leave the tid_tx data pending forever.
* This is true, in a way, but "forever" is only until the
* station struct is actually destroyed. In the meantime,
* leaving it around ensures that we don't transmit packets
* to the driver on this TID which might confuse it.
*/
return 0;
}
/*
* After sending add Block Ack request we activated a timer until
* add Block Ack response will arrive from the recipient.
* If this timer expires sta_addba_resp_timer_expired will be executed.
*/
static void sta_addba_resp_timer_expired(struct timer_list *t)
{
struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer);
struct sta_info *sta = tid_tx->sta;
u8 tid = tid_tx->tid;
/* check if the TID waits for addBA response */
if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
ht_dbg(sta->sdata,
"timer expired on %pM tid %d not expecting addBA response\n",
sta->sta.addr, tid);
return;
}
ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
sta->sta.addr, tid);
ieee80211_stop_tx_ba_session(&sta->sta, tid);
}
static void ieee80211_send_addba_with_timeout(struct sta_info *sta,
struct tid_ampdu_tx *tid_tx)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sta->local;
u8 tid = tid_tx->tid;
u16 buf_size;
if (WARN_ON_ONCE(test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state) ||
test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state)))
return;
lockdep_assert_held(&sta->ampdu_mlme.mtx);
/* activate the timer for the recipient's addBA response */
mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
sta->sta.addr, tid);
spin_lock_bh(&sta->lock);
sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
sta->ampdu_mlme.addba_req_num[tid]++;
spin_unlock_bh(&sta->lock);
if (sta->sta.deflink.he_cap.has_he) {
buf_size = local->hw.max_tx_aggregation_subframes;
} else {
/*
* We really should use what the driver told us it will
* transmit as the maximum, but certain APs (e.g. the
* LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012)
* will crash when we use a lower number.
*/
buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
}
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
tid_tx->dialog_token, tid_tx->ssn,
buf_size, tid_tx->timeout);
WARN_ON(test_and_set_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state));
}
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
struct tid_ampdu_tx *tid_tx;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_ampdu_params params = {
.sta = &sta->sta,
.action = IEEE80211_AMPDU_TX_START,
.tid = tid,
.buf_size = 0,
.amsdu = false,
.timeout = 0,
};
int ret;
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/*
* Start queuing up packets for this aggregation session.
* We're going to release them once the driver is OK with
* that.
*/
clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
/*
* Make sure no packets are being processed. This ensures that
* we have a valid starting sequence number and that in-flight
* packets have been flushed out and no packets for this TID
* will go into the driver during the ampdu_action call.
*/
synchronize_net();
sdata = sta->sdata;
params.ssn = sta->tid_seq[tid] >> 4;
ret = drv_ampdu_action(local, sdata, ¶ms);
tid_tx->ssn = params.ssn;
if (ret == IEEE80211_AMPDU_TX_START_DELAY_ADDBA) {
return;
} else if (ret == IEEE80211_AMPDU_TX_START_IMMEDIATE) {
/*
* We didn't send the request yet, so don't need to check
* here if we already got a response, just mark as driver
* ready immediately.
*/
set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state);
} else if (ret) {
if (!sdata)
return;
ht_dbg(sdata,
"BA request denied - HW unavailable for %pM tid %d\n",
sta->sta.addr, tid);
spin_lock_bh(&sta->lock);
ieee80211_agg_splice_packets(sdata, tid_tx, tid);
ieee80211_assign_tid_tx(sta, tid, NULL);
ieee80211_agg_splice_finish(sdata, tid);
spin_unlock_bh(&sta->lock);
ieee80211_agg_start_txq(sta, tid, false);
kfree_rcu(tid_tx, rcu_head);
return;
}
ieee80211_send_addba_with_timeout(sta, tid_tx);
}
void ieee80211_refresh_tx_agg_session_timer(struct ieee80211_sta *pubsta,
u16 tid)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct tid_ampdu_tx *tid_tx;
if (WARN_ON_ONCE(tid >= IEEE80211_NUM_TIDS))
return;
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
if (!tid_tx)
return;
tid_tx->last_tx = jiffies;
}
EXPORT_SYMBOL(ieee80211_refresh_tx_agg_session_timer);
/*
* After accepting the AddBA Response we activated a timer,
* resetting it after each frame that we send.
*/
static void sta_tx_agg_session_timer_expired(struct timer_list *t)
{
struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer);
struct sta_info *sta = tid_tx->sta;
u8 tid = tid_tx->tid;
unsigned long timeout;
if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
return;
}
timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
if (time_is_after_jiffies(timeout)) {
mod_timer(&tid_tx->session_timer, timeout);
return;
}
ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
sta->sta.addr, tid);
ieee80211_stop_tx_ba_session(&sta->sta, tid);
}
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
u16 timeout)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
struct tid_ampdu_tx *tid_tx;
int ret = 0;
trace_api_start_tx_ba_session(pubsta, tid);
if (WARN(sta->reserved_tid == tid,
"Requested to start BA session on reserved tid=%d", tid))
return -EINVAL;
if (!pubsta->deflink.ht_cap.ht_supported &&
sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ)
return -EINVAL;
if (WARN_ON_ONCE(!local->ops->ampdu_action))
return -EINVAL;
if ((tid >= IEEE80211_NUM_TIDS) ||
!ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) ||
ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
return -EINVAL;
if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID))
return -EINVAL;
ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
pubsta->addr, tid);
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_AP &&
sdata->vif.type != NL80211_IFTYPE_ADHOC)
return -EINVAL;
if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
ht_dbg(sdata,
"BA sessions blocked - Denying BA session request %pM tid %d\n",
sta->sta.addr, tid);
return -EINVAL;
}
if (test_sta_flag(sta, WLAN_STA_MFP) &&
!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
ht_dbg(sdata,
"MFP STA not authorized - deny BA session request %pM tid %d\n",
sta->sta.addr, tid);
return -EINVAL;
}
/*
* 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
* member of an IBSS, and has no other existing Block Ack agreement
* with the recipient STA, then the initiating STA shall transmit a
* Probe Request frame to the recipient STA and shall not transmit an
* ADDBA Request frame unless it receives a Probe Response frame
* from the recipient within dot11ADDBAFailureTimeout.
*
* The probe request mechanism for ADDBA is currently not implemented,
* but we only build up Block Ack session with HT STAs. This information
* is set when we receive a bss info from a probe response or a beacon.
*/
if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
!sta->sta.deflink.ht_cap.ht_supported) {
ht_dbg(sdata,
"BA request denied - IBSS STA %pM does not advertise HT support\n",
pubsta->addr);
return -EINVAL;
}
spin_lock_bh(&sta->lock);
/* we have tried too many times, receiver does not want A-MPDU */
if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
ret = -EBUSY;
goto err_unlock_sta;
}
/*
* if we have tried more than HT_AGG_BURST_RETRIES times we
* will spread our requests in time to avoid stalling connection
* for too long
*/
if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
HT_AGG_RETRIES_PERIOD)) {
ht_dbg(sdata,
"BA request denied - %d failed requests on %pM tid %u\n",
sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
ret = -EBUSY;
goto err_unlock_sta;
}
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/* check if the TID is not in aggregation flow already */
if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
ht_dbg(sdata,
"BA request denied - session is not idle on %pM tid %u\n",
sta->sta.addr, tid);
ret = -EAGAIN;
goto err_unlock_sta;
}
/* prepare A-MPDU MLME for Tx aggregation */
tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
if (!tid_tx) {
ret = -ENOMEM;
goto err_unlock_sta;
}
skb_queue_head_init(&tid_tx->pending);
__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
tid_tx->timeout = timeout;
tid_tx->sta = sta;
tid_tx->tid = tid;
/* response timer */
timer_setup(&tid_tx->addba_resp_timer, sta_addba_resp_timer_expired, 0);
/* tx timer */
timer_setup(&tid_tx->session_timer,
sta_tx_agg_session_timer_expired, TIMER_DEFERRABLE);
/* assign a dialog token */
sta->ampdu_mlme.dialog_token_allocator++;
tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
/*
* Finally, assign it to the start array; the work item will
* collect it and move it to the normal array.
*/
sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;
ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
/* this flow continues off the work */
err_unlock_sta:
spin_unlock_bh(&sta->lock);
return ret;
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
struct tid_ampdu_tx *tid_tx;
struct ieee80211_ampdu_params params = {
.sta = &sta->sta,
.action = IEEE80211_AMPDU_TX_OPERATIONAL,
.tid = tid,
.timeout = 0,
.ssn = 0,
};
lockdep_assert_held(&sta->ampdu_mlme.mtx);
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
params.buf_size = tid_tx->buf_size;
params.amsdu = tid_tx->amsdu;
ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
sta->sta.addr, tid);
drv_ampdu_action(local, sta->sdata, ¶ms);
/*
* synchronize with TX path, while splicing the TX path
* should block so it won't put more packets onto pending.
*/
spin_lock_bh(&sta->lock);
ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
/*
* Now mark as operational. This will be visible
* in the TX path, and lets it go lock-free in
* the common case.
*/
set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
ieee80211_agg_splice_finish(sta->sdata, tid);
spin_unlock_bh(&sta->lock);
ieee80211_agg_start_txq(sta, tid, true);
}
void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
struct tid_ampdu_tx *tid_tx)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
lockdep_assert_held(&sta->ampdu_mlme.mtx);
if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
return;
if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state) ||
test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
return;
if (!test_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state)) {
ieee80211_send_addba_with_timeout(sta, tid_tx);
/* RESPONSE_RECEIVED state whould trigger the flow again */
return;
}
if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
ieee80211_agg_tx_operational(local, sta, tid);
}
static struct tid_ampdu_tx *
ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
const u8 *ra, u16 tid, struct sta_info **sta)
{
struct tid_ampdu_tx *tid_tx;
if (tid >= IEEE80211_NUM_TIDS) {
ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
tid, IEEE80211_NUM_TIDS);
return NULL;
}
*sta = sta_info_get_bss(sdata, ra);
if (!*sta) {
ht_dbg(sdata, "Could not find station: %pM\n", ra);
return NULL;
}
tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
if (WARN_ON(!tid_tx))
ht_dbg(sdata, "addBA was not requested!\n");
return tid_tx;
}
void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
const u8 *ra, u16 tid)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
struct tid_ampdu_tx *tid_tx;
trace_api_start_tx_ba_cb(sdata, ra, tid);
rcu_read_lock();
tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
if (!tid_tx)
goto out;
set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
out:
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_agg_stop_reason reason)
{
int ret;
mutex_lock(&sta->ampdu_mlme.mtx);
ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);
mutex_unlock(&sta->ampdu_mlme.mtx);
return ret;
}
int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
struct tid_ampdu_tx *tid_tx;
int ret = 0;
trace_api_stop_tx_ba_session(pubsta, tid);
if (!local->ops->ampdu_action)
return -EINVAL;
if (tid >= IEEE80211_NUM_TIDS)
return -EINVAL;
spin_lock_bh(&sta->lock);
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
if (!tid_tx) {
ret = -ENOENT;
goto unlock;
}
WARN(sta->reserved_tid == tid,
"Requested to stop BA session on reserved tid=%d", tid);
if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
/* already in progress stopping it */
ret = 0;
goto unlock;
}
set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
unlock:
spin_unlock_bh(&sta->lock);
return ret;
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
struct tid_ampdu_tx *tid_tx)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
bool send_delba = false;
bool start_txq = false;
ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
sta->sta.addr, tid);
spin_lock_bh(&sta->lock);
if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
ht_dbg(sdata,
"unexpected callback to A-MPDU stop for %pM tid %d\n",
sta->sta.addr, tid);
goto unlock_sta;
}
if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop)
send_delba = true;
ieee80211_remove_tid_tx(sta, tid);
start_txq = true;
unlock_sta:
spin_unlock_bh(&sta->lock);
if (start_txq)
ieee80211_agg_start_txq(sta, tid, false);
if (send_delba)
ieee80211_send_delba(sdata, sta->sta.addr, tid,
WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
}
void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
const u8 *ra, u16 tid)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
struct tid_ampdu_tx *tid_tx;
trace_api_stop_tx_ba_cb(sdata, ra, tid);
rcu_read_lock();
tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
if (!tid_tx)
goto out;
set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
out:
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
void ieee80211_process_addba_resp(struct ieee80211_local *local,
struct sta_info *sta,
struct ieee80211_mgmt *mgmt,
size_t len)
{
struct tid_ampdu_tx *tid_tx;
struct ieee80211_txq *txq;
u16 capab, tid, buf_size;
bool amsdu;
capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
tid = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_TID_MASK);
buf_size = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
txq = sta->sta.txq[tid];
if (!amsdu && txq)
set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags);
mutex_lock(&sta->ampdu_mlme.mtx);
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
if (!tid_tx)
goto out;
if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n",
sta->sta.addr, tid);
goto out;
}
del_timer_sync(&tid_tx->addba_resp_timer);
ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n",
sta->sta.addr, tid);
/*
* addba_resp_timer may have fired before we got here, and
* caused WANT_STOP to be set. If the stop then was already
* processed further, STOPPING might be set.
*/
if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
ht_dbg(sta->sdata,
"got addBA resp for %pM tid %d but we already gave up\n",
sta->sta.addr, tid);
goto out;
}
/*
* IEEE 802.11-2007 7.3.1.14:
* In an ADDBA Response frame, when the Status Code field
* is set to 0, the Buffer Size subfield is set to a value
* of at least 1.
*/
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
== WLAN_STATUS_SUCCESS && buf_size) {
if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
&tid_tx->state)) {
/* ignore duplicate response */
goto out;
}
tid_tx->buf_size = buf_size;
tid_tx->amsdu = amsdu;
if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
ieee80211_agg_tx_operational(local, sta, tid);
sta->ampdu_mlme.addba_req_num[tid] = 0;
tid_tx->timeout =
le16_to_cpu(mgmt->u.action.u.addba_resp.timeout);
if (tid_tx->timeout) {
mod_timer(&tid_tx->session_timer,
TU_TO_EXP_TIME(tid_tx->timeout));
tid_tx->last_tx = jiffies;
}
} else {
___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED);
}
out:
mutex_unlock(&sta->ampdu_mlme.mtx);
}
| linux-master | net/mac80211/agg-tx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HT handling
*
* Copyright 2003, Jouni Malinen <[email protected]>
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2007-2010, Intel Corporation
* Copyright 2017 Intel Deutschland GmbH
* Copyright(c) 2020-2023 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "rate.h"
static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa,
struct ieee80211_ht_cap *ht_capa_mask,
struct ieee80211_sta_ht_cap *ht_cap,
u16 flag)
{
__le16 le_flag = cpu_to_le16(flag);
if (ht_capa_mask->cap_info & le_flag) {
if (!(ht_capa->cap_info & le_flag))
ht_cap->cap &= ~flag;
}
}
static void __check_htcap_enable(struct ieee80211_ht_cap *ht_capa,
struct ieee80211_ht_cap *ht_capa_mask,
struct ieee80211_sta_ht_cap *ht_cap,
u16 flag)
{
__le16 le_flag = cpu_to_le16(flag);
if ((ht_capa_mask->cap_info & le_flag) &&
(ht_capa->cap_info & le_flag))
ht_cap->cap |= flag;
}
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_ht_cap *ht_cap)
{
struct ieee80211_ht_cap *ht_capa, *ht_capa_mask;
u8 *scaps, *smask;
int i;
if (!ht_cap->ht_supported)
return;
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
ht_capa = &sdata->u.mgd.ht_capa;
ht_capa_mask = &sdata->u.mgd.ht_capa_mask;
break;
case NL80211_IFTYPE_ADHOC:
ht_capa = &sdata->u.ibss.ht_capa;
ht_capa_mask = &sdata->u.ibss.ht_capa_mask;
break;
default:
WARN_ON_ONCE(1);
return;
}
scaps = (u8 *)(&ht_capa->mcs.rx_mask);
smask = (u8 *)(&ht_capa_mask->mcs.rx_mask);
/* NOTE: If you add more over-rides here, update register_hw
* ht_capa_mod_mask logic in main.c as well.
* And, if this method can ever change ht_cap.ht_supported, fix
* the check in ieee80211_add_ht_ie.
*/
/* check for HT over-rides, MCS rates first. */
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
u8 m = smask[i];
ht_cap->mcs.rx_mask[i] &= ~m; /* turn off all masked bits */
/* Add back rates that are supported */
ht_cap->mcs.rx_mask[i] |= (m & scaps[i]);
}
/* Force removal of HT-40 capabilities? */
__check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
IEEE80211_HT_CAP_SUP_WIDTH_20_40);
__check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
IEEE80211_HT_CAP_SGI_40);
/* Allow user to disable SGI-20 (SGI-40 is handled above) */
__check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
IEEE80211_HT_CAP_SGI_20);
/* Allow user to disable the max-AMSDU bit. */
__check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
IEEE80211_HT_CAP_MAX_AMSDU);
/* Allow user to disable LDPC */
__check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
IEEE80211_HT_CAP_LDPC_CODING);
/* Allow user to enable 40 MHz intolerant bit. */
__check_htcap_enable(ht_capa, ht_capa_mask, ht_cap,
IEEE80211_HT_CAP_40MHZ_INTOLERANT);
/* Allow user to enable TX STBC bit */
__check_htcap_enable(ht_capa, ht_capa_mask, ht_cap,
IEEE80211_HT_CAP_TX_STBC);
/* Allow user to configure RX STBC bits */
if (ht_capa_mask->cap_info & cpu_to_le16(IEEE80211_HT_CAP_RX_STBC))
ht_cap->cap |= le16_to_cpu(ht_capa->cap_info) &
IEEE80211_HT_CAP_RX_STBC;
/* Allow user to decrease AMPDU factor */
if (ht_capa_mask->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_FACTOR) {
u8 n = ht_capa->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_FACTOR;
if (n < ht_cap->ampdu_factor)
ht_cap->ampdu_factor = n;
}
/* Allow the user to increase AMPDU density. */
if (ht_capa_mask->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_DENSITY) {
u8 n = (ht_capa->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_DENSITY)
>> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT;
if (n > ht_cap->ampdu_density)
ht_cap->ampdu_density = n;
}
}
bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_ht_cap *ht_cap_ie,
struct link_sta_info *link_sta)
{
struct ieee80211_bss_conf *link_conf;
struct sta_info *sta = link_sta->sta;
struct ieee80211_sta_ht_cap ht_cap, own_cap;
u8 ampdu_info, tx_mcs_set_cap;
int i, max_tx_streams;
bool changed;
enum ieee80211_sta_rx_bandwidth bw;
enum nl80211_chan_width width;
memset(&ht_cap, 0, sizeof(ht_cap));
if (!ht_cap_ie || !sband->ht_cap.ht_supported)
goto apply;
ht_cap.ht_supported = true;
own_cap = sband->ht_cap;
/*
* If user has specified capability over-rides, take care
* of that if the station we're setting up is the AP or TDLS peer that
* we advertised a restricted capability set to. Override
* our own capabilities and then use those below.
*/
if (sdata->vif.type == NL80211_IFTYPE_STATION ||
sdata->vif.type == NL80211_IFTYPE_ADHOC)
ieee80211_apply_htcap_overrides(sdata, &own_cap);
/*
* The bits listed in this expression should be
* the same for the peer and us, if the station
* advertises more then we can't use those thus
* we mask them out.
*/
ht_cap.cap = le16_to_cpu(ht_cap_ie->cap_info) &
(own_cap.cap | ~(IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_DSSSCCK40));
/*
* The STBC bits are asymmetric -- if we don't have
* TX then mask out the peer's RX and vice versa.
*/
if (!(own_cap.cap & IEEE80211_HT_CAP_TX_STBC))
ht_cap.cap &= ~IEEE80211_HT_CAP_RX_STBC;
if (!(own_cap.cap & IEEE80211_HT_CAP_RX_STBC))
ht_cap.cap &= ~IEEE80211_HT_CAP_TX_STBC;
ampdu_info = ht_cap_ie->ampdu_params_info;
ht_cap.ampdu_factor =
ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR;
ht_cap.ampdu_density =
(ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2;
/* own MCS TX capabilities */
tx_mcs_set_cap = own_cap.mcs.tx_params;
/* Copy peer MCS TX capabilities, the driver might need them. */
ht_cap.mcs.tx_params = ht_cap_ie->mcs.tx_params;
/* can we TX with MCS rates? */
if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED))
goto apply;
/* Counting from 0, therefore +1 */
if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF)
max_tx_streams =
((tx_mcs_set_cap & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
>> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1;
else
max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS;
/*
* 802.11n-2009 20.3.5 / 20.6 says:
* - indices 0 to 7 and 32 are single spatial stream
* - 8 to 31 are multiple spatial streams using equal modulation
* [8..15 for two streams, 16..23 for three and 24..31 for four]
* - remainder are multiple spatial streams using unequal modulation
*/
for (i = 0; i < max_tx_streams; i++)
ht_cap.mcs.rx_mask[i] =
own_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i];
if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION)
for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE;
i < IEEE80211_HT_MCS_MASK_LEN; i++)
ht_cap.mcs.rx_mask[i] =
own_cap.mcs.rx_mask[i] &
ht_cap_ie->mcs.rx_mask[i];
/* handle MCS rate 32 too */
if (own_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1)
ht_cap.mcs.rx_mask[32/8] |= 1;
/* set Rx highest rate */
ht_cap.mcs.rx_highest = ht_cap_ie->mcs.rx_highest;
if (ht_cap.cap & IEEE80211_HT_CAP_MAX_AMSDU)
link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_7935;
else
link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_3839;
ieee80211_sta_recalc_aggregates(&sta->sta);
apply:
changed = memcmp(&link_sta->pub->ht_cap, &ht_cap, sizeof(ht_cap));
memcpy(&link_sta->pub->ht_cap, &ht_cap, sizeof(ht_cap));
rcu_read_lock();
link_conf = rcu_dereference(sdata->vif.link_conf[link_sta->link_id]);
if (WARN_ON(!link_conf))
width = NL80211_CHAN_WIDTH_20_NOHT;
else
width = link_conf->chandef.width;
switch (width) {
default:
WARN_ON_ONCE(1);
fallthrough;
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
bw = IEEE80211_STA_RX_BW_20;
break;
case NL80211_CHAN_WIDTH_40:
case NL80211_CHAN_WIDTH_80:
case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
bw = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
break;
}
rcu_read_unlock();
link_sta->pub->bandwidth = bw;
link_sta->cur_max_bandwidth =
ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
enum ieee80211_smps_mode smps_mode;
switch ((ht_cap.cap & IEEE80211_HT_CAP_SM_PS)
>> IEEE80211_HT_CAP_SM_PS_SHIFT) {
case WLAN_HT_CAP_SM_PS_INVALID:
case WLAN_HT_CAP_SM_PS_STATIC:
smps_mode = IEEE80211_SMPS_STATIC;
break;
case WLAN_HT_CAP_SM_PS_DYNAMIC:
smps_mode = IEEE80211_SMPS_DYNAMIC;
break;
case WLAN_HT_CAP_SM_PS_DISABLED:
smps_mode = IEEE80211_SMPS_OFF;
break;
}
if (smps_mode != link_sta->pub->smps_mode)
changed = true;
link_sta->pub->smps_mode = smps_mode;
} else {
link_sta->pub->smps_mode = IEEE80211_SMPS_OFF;
}
return changed;
}
void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
enum ieee80211_agg_stop_reason reason)
{
int i;
mutex_lock(&sta->ampdu_mlme.mtx);
for (i = 0; i < IEEE80211_NUM_TIDS; i++)
___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
WLAN_REASON_QSTA_LEAVE_QBSS,
reason != AGG_STOP_DESTROY_STA &&
reason != AGG_STOP_PEER_REQUEST);
for (i = 0; i < IEEE80211_NUM_TIDS; i++)
___ieee80211_stop_tx_ba_session(sta, i, reason);
mutex_unlock(&sta->ampdu_mlme.mtx);
/*
* In case the tear down is part of a reconfigure due to HW restart
* request, it is possible that the low level driver requested to stop
* the BA session, so handle it to properly clean tid_tx data.
*/
if(reason == AGG_STOP_DESTROY_STA) {
cancel_work_sync(&sta->ampdu_mlme.work);
mutex_lock(&sta->ampdu_mlme.mtx);
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
struct tid_ampdu_tx *tid_tx =
rcu_dereference_protected_tid_tx(sta, i);
if (!tid_tx)
continue;
if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
ieee80211_stop_tx_ba_cb(sta, i, tid_tx);
}
mutex_unlock(&sta->ampdu_mlme.mtx);
}
}
void ieee80211_ba_session_work(struct work_struct *work)
{
struct sta_info *sta =
container_of(work, struct sta_info, ampdu_mlme.work);
struct tid_ampdu_tx *tid_tx;
bool blocked;
int tid;
/* When this flag is set, new sessions should be blocked. */
blocked = test_sta_flag(sta, WLAN_STA_BLOCK_BA);
mutex_lock(&sta->ampdu_mlme.mtx);
for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired))
___ieee80211_stop_rx_ba_session(
sta, tid, WLAN_BACK_RECIPIENT,
WLAN_REASON_QSTA_TIMEOUT, true);
if (test_and_clear_bit(tid,
sta->ampdu_mlme.tid_rx_stop_requested))
___ieee80211_stop_rx_ba_session(
sta, tid, WLAN_BACK_RECIPIENT,
WLAN_REASON_UNSPECIFIED, true);
if (!blocked &&
test_and_clear_bit(tid,
sta->ampdu_mlme.tid_rx_manage_offl))
___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
IEEE80211_MAX_AMPDU_BUF_HT,
false, true, NULL);
if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS,
sta->ampdu_mlme.tid_rx_manage_offl))
___ieee80211_stop_rx_ba_session(
sta, tid, WLAN_BACK_RECIPIENT,
0, false);
spin_lock_bh(&sta->lock);
tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
if (!blocked && tid_tx) {
struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
struct ieee80211_sub_if_data *sdata =
vif_to_sdata(txqi->txq.vif);
struct fq *fq = &sdata->local->fq;
spin_lock_bh(&fq->lock);
/* Allow only frags to be dequeued */
set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
if (!skb_queue_empty(&txqi->frags)) {
/* Fragmented Tx is ongoing, wait for it to
* finish. Reschedule worker to retry later.
*/
spin_unlock_bh(&fq->lock);
spin_unlock_bh(&sta->lock);
/* Give the task working on the txq a chance
* to send out the queued frags
*/
synchronize_net();
mutex_unlock(&sta->ampdu_mlme.mtx);
ieee80211_queue_work(&sdata->local->hw, work);
return;
}
spin_unlock_bh(&fq->lock);
/*
* Assign it over to the normal tid_tx array
* where it "goes live".
*/
sta->ampdu_mlme.tid_start_tx[tid] = NULL;
/* could there be a race? */
if (sta->ampdu_mlme.tid_tx[tid])
kfree(tid_tx);
else
ieee80211_assign_tid_tx(sta, tid, tid_tx);
spin_unlock_bh(&sta->lock);
ieee80211_tx_ba_session_handle_start(sta, tid);
continue;
}
spin_unlock_bh(&sta->lock);
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
if (!tid_tx)
continue;
if (!blocked &&
test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
___ieee80211_stop_tx_ba_session(sta, tid,
AGG_STOP_LOCAL_REQUEST);
if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
}
mutex_unlock(&sta->ampdu_mlme.mtx);
}
void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
const u8 *da, u16 tid,
u16 initiator, u16 reason_code)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
u16 params;
skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = skb_put_zero(skb, 24);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba));
mgmt->u.action.category = WLAN_CATEGORY_BACK;
mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
params = (u16)(initiator << 11); /* bit 11 initiator */
params |= (u16)(tid << 12); /* bit 15:12 TID number */
mgmt->u.action.u.delba.params = cpu_to_le16(params);
mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
ieee80211_tx_skb(sdata, skb);
}
void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_mgmt *mgmt, size_t len)
{
u16 tid, params;
u16 initiator;
params = le16_to_cpu(mgmt->u.action.u.delba.params);
tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
ht_dbg_ratelimited(sdata, "delba from %pM (%s) tid %d reason code %d\n",
mgmt->sa, initiator ? "initiator" : "recipient",
tid,
le16_to_cpu(mgmt->u.action.u.delba.reason_code));
if (initiator == WLAN_BACK_INITIATOR)
__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0,
true);
else
__ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_PEER_REQUEST);
}
enum nl80211_smps_mode
ieee80211_smps_mode_to_smps_mode(enum ieee80211_smps_mode smps)
{
switch (smps) {
case IEEE80211_SMPS_OFF:
return NL80211_SMPS_OFF;
case IEEE80211_SMPS_STATIC:
return NL80211_SMPS_STATIC;
case IEEE80211_SMPS_DYNAMIC:
return NL80211_SMPS_DYNAMIC;
default:
return NL80211_SMPS_OFF;
}
}
int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps, const u8 *da,
const u8 *bssid)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *action_frame;
/* 27 = header + category + action + smps mode */
skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom);
if (!skb)
return -ENOMEM;
skb_reserve(skb, local->hw.extra_tx_headroom);
action_frame = skb_put(skb, 27);
memcpy(action_frame->da, da, ETH_ALEN);
memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN);
memcpy(action_frame->bssid, bssid, ETH_ALEN);
action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
action_frame->u.action.category = WLAN_CATEGORY_HT;
action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
switch (smps) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
fallthrough;
case IEEE80211_SMPS_OFF:
action_frame->u.action.u.ht_smps.smps_control =
WLAN_HT_SMPS_CONTROL_DISABLED;
break;
case IEEE80211_SMPS_STATIC:
action_frame->u.action.u.ht_smps.smps_control =
WLAN_HT_SMPS_CONTROL_STATIC;
break;
case IEEE80211_SMPS_DYNAMIC:
action_frame->u.action.u.ht_smps.smps_control =
WLAN_HT_SMPS_CONTROL_DYNAMIC;
break;
}
/* we'll do more on status of this frame */
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
ieee80211_tx_skb(sdata, skb);
return 0;
}
void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id,
enum ieee80211_smps_mode smps_mode)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_link_data *link;
if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION))
return;
rcu_read_lock();
link = rcu_dereference(sdata->link[link_id]);
if (WARN_ON(!link))
goto out;
if (link->u.mgd.driver_smps_mode == smps_mode)
goto out;
link->u.mgd.driver_smps_mode = smps_mode;
wiphy_work_queue(sdata->local->hw.wiphy,
&link->u.mgd.request_smps_work);
out:
rcu_read_unlock();
}
/* this might change ... don't want non-open drivers using it */
EXPORT_SYMBOL_GPL(ieee80211_request_smps);
| linux-master | net/mac80211/ht.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
* Copyright (C) 2019, 2021-2023 Intel Corporation
* Author: Luis Carlos Cobo <[email protected]>
*/
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include "ieee80211_i.h"
#include "rate.h"
#include "mesh.h"
#define PLINK_CNF_AID(mgmt) ((mgmt)->u.action.u.self_prot.variable + 2)
#define PLINK_GET_LLID(p) (p + 2)
#define PLINK_GET_PLID(p) (p + 4)
#define mod_plink_timer(s, t) (mod_timer(&s->mesh->plink_timer, \
jiffies + msecs_to_jiffies(t)))
enum plink_event {
PLINK_UNDEFINED,
OPN_ACPT,
OPN_RJCT,
OPN_IGNR,
CNF_ACPT,
CNF_RJCT,
CNF_IGNR,
CLS_ACPT,
CLS_IGNR
};
static const char * const mplstates[] = {
[NL80211_PLINK_LISTEN] = "LISTEN",
[NL80211_PLINK_OPN_SNT] = "OPN-SNT",
[NL80211_PLINK_OPN_RCVD] = "OPN-RCVD",
[NL80211_PLINK_CNF_RCVD] = "CNF_RCVD",
[NL80211_PLINK_ESTAB] = "ESTAB",
[NL80211_PLINK_HOLDING] = "HOLDING",
[NL80211_PLINK_BLOCKED] = "BLOCKED"
};
static const char * const mplevents[] = {
[PLINK_UNDEFINED] = "NONE",
[OPN_ACPT] = "OPN_ACPT",
[OPN_RJCT] = "OPN_RJCT",
[OPN_IGNR] = "OPN_IGNR",
[CNF_ACPT] = "CNF_ACPT",
[CNF_RJCT] = "CNF_RJCT",
[CNF_IGNR] = "CNF_IGNR",
[CLS_ACPT] = "CLS_ACPT",
[CLS_IGNR] = "CLS_IGNR"
};
/* We only need a valid sta if user configured a minimum rssi_threshold. */
static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
return rssi_threshold == 0 ||
(sta &&
(s8)-ewma_signal_read(&sta->deflink.rx_stats_avg.signal) >
rssi_threshold);
}
/**
* mesh_plink_fsm_restart - restart a mesh peer link finite state machine
*
* @sta: mesh peer link to restart
*
* Locking: this function must be called holding sta->mesh->plink_lock
*/
static inline void mesh_plink_fsm_restart(struct sta_info *sta)
{
lockdep_assert_held(&sta->mesh->plink_lock);
sta->mesh->plink_state = NL80211_PLINK_LISTEN;
sta->mesh->llid = sta->mesh->plid = sta->mesh->reason = 0;
sta->mesh->plink_retries = 0;
}
/*
* mesh_set_short_slot_time - enable / disable ERP short slot time.
*
* The standard indirectly mandates mesh STAs to turn off short slot time by
* disallowing advertising this (802.11-2012 8.4.1.4), but that doesn't mean we
* can't be sneaky about it. Enable short slot time if all mesh STAs in the
* MBSS support ERP rates.
*
* Returns BSS_CHANGED_ERP_SLOT or 0 for no change.
*/
static u64 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
struct sta_info *sta;
u32 erp_rates = 0;
u64 changed = 0;
int i;
bool short_slot = false;
sband = ieee80211_get_sband(sdata);
if (!sband)
return changed;
if (sband->band == NL80211_BAND_5GHZ) {
/* (IEEE 802.11-2012 19.4.5) */
short_slot = true;
goto out;
} else if (sband->band != NL80211_BAND_2GHZ) {
goto out;
}
for (i = 0; i < sband->n_bitrates; i++)
if (sband->bitrates[i].flags & IEEE80211_RATE_ERP_G)
erp_rates |= BIT(i);
if (!erp_rates)
goto out;
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sdata != sta->sdata ||
sta->mesh->plink_state != NL80211_PLINK_ESTAB)
continue;
short_slot = false;
if (erp_rates & sta->sta.deflink.supp_rates[sband->band])
short_slot = true;
else
break;
}
rcu_read_unlock();
out:
if (sdata->vif.bss_conf.use_short_slot != short_slot) {
sdata->vif.bss_conf.use_short_slot = short_slot;
changed = BSS_CHANGED_ERP_SLOT;
mpl_dbg(sdata, "mesh_plink %pM: ERP short slot time %d\n",
sdata->vif.addr, short_slot);
}
return changed;
}
/**
* mesh_set_ht_prot_mode - set correct HT protection mode
* @sdata: the (mesh) interface to handle
*
* Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
* mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT
* mixed mode, 20MHz-protection and no-protection mode. non-HT mixed mode is
* selected if any non-HT peers are present in our MBSS. 20MHz-protection mode
* is selected if all peers in our 20/40MHz MBSS support HT and at least one
* HT20 peer is present. Otherwise no-protection mode is selected.
*/
static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
u16 ht_opmode;
bool non_ht_sta = false, ht20_sta = false;
switch (sdata->vif.bss_conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
return 0;
default:
break;
}
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sdata != sta->sdata ||
sta->mesh->plink_state != NL80211_PLINK_ESTAB)
continue;
if (sta->sta.deflink.bandwidth > IEEE80211_STA_RX_BW_20)
continue;
if (!sta->sta.deflink.ht_cap.ht_supported) {
mpl_dbg(sdata, "nonHT sta (%pM) is present\n",
sta->sta.addr);
non_ht_sta = true;
break;
}
mpl_dbg(sdata, "HT20 sta (%pM) is present\n", sta->sta.addr);
ht20_sta = true;
}
rcu_read_unlock();
if (non_ht_sta)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
else if (ht20_sta &&
sdata->vif.bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
else
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
if (sdata->vif.bss_conf.ht_operation_mode == ht_opmode)
return 0;
sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
sdata->u.mesh.mshcfg.ht_opmode = ht_opmode;
mpl_dbg(sdata, "selected new HT protection mode %d\n", ht_opmode);
return BSS_CHANGED_HT;
}
static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
enum ieee80211_self_protected_actioncode action,
u8 *da, u16 llid, u16 plid, u16 reason)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
struct ieee80211_mgmt *mgmt;
bool include_plid = false;
u16 peering_proto = 0;
u8 *pos, ie_len = 4;
u8 ie_len_he_cap, ie_len_eht_cap;
int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.self_prot);
int err = -ENOMEM;
ie_len_he_cap = ieee80211_ie_len_he_cap(sdata,
NL80211_IFTYPE_MESH_POINT);
ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata,
NL80211_IFTYPE_MESH_POINT);
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
2 + /* capability info */
2 + /* AID */
2 + 8 + /* supported rates */
2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2 + sdata->u.mesh.mesh_id_len +
2 + sizeof(struct ieee80211_meshconf_ie) +
2 + sizeof(struct ieee80211_ht_cap) +
2 + sizeof(struct ieee80211_ht_operation) +
2 + sizeof(struct ieee80211_vht_cap) +
2 + sizeof(struct ieee80211_vht_operation) +
ie_len_he_cap +
2 + 1 + sizeof(struct ieee80211_he_operation) +
sizeof(struct ieee80211_he_6ghz_oper) +
2 + 1 + sizeof(struct ieee80211_he_6ghz_capa) +
ie_len_eht_cap +
2 + 1 + offsetof(struct ieee80211_eht_operation, optional) +
offsetof(struct ieee80211_eht_operation_info, optional) +
2 + 8 + /* peering IE */
sdata->u.mesh.ie_len);
if (!skb)
return err;
info = IEEE80211_SKB_CB(skb);
skb_reserve(skb, local->tx_headroom);
mgmt = skb_put_zero(skb, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.action.category = WLAN_CATEGORY_SELF_PROTECTED;
mgmt->u.action.u.self_prot.action_code = action;
if (action != WLAN_SP_MESH_PEERING_CLOSE) {
struct ieee80211_supported_band *sband;
enum nl80211_band band;
sband = ieee80211_get_sband(sdata);
if (!sband) {
err = -EINVAL;
goto free;
}
band = sband->band;
/* capability info */
pos = skb_put_zero(skb, 2);
if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
/* AID */
pos = skb_put(skb, 2);
put_unaligned_le16(sta->sta.aid, pos);
}
if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
mesh_add_rsn_ie(sdata, skb) ||
mesh_add_meshid_ie(sdata, skb) ||
mesh_add_meshconf_ie(sdata, skb))
goto free;
} else { /* WLAN_SP_MESH_PEERING_CLOSE */
info->flags |= IEEE80211_TX_CTL_NO_ACK;
if (mesh_add_meshid_ie(sdata, skb))
goto free;
}
/* Add Mesh Peering Management element */
switch (action) {
case WLAN_SP_MESH_PEERING_OPEN:
break;
case WLAN_SP_MESH_PEERING_CONFIRM:
ie_len += 2;
include_plid = true;
break;
case WLAN_SP_MESH_PEERING_CLOSE:
if (plid) {
ie_len += 2;
include_plid = true;
}
ie_len += 2; /* reason code */
break;
default:
err = -EINVAL;
goto free;
}
if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
goto free;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PEER_MGMT;
*pos++ = ie_len;
memcpy(pos, &peering_proto, 2);
pos += 2;
put_unaligned_le16(llid, pos);
pos += 2;
if (include_plid) {
put_unaligned_le16(plid, pos);
pos += 2;
}
if (action == WLAN_SP_MESH_PEERING_CLOSE) {
put_unaligned_le16(reason, pos);
pos += 2;
}
if (action != WLAN_SP_MESH_PEERING_CLOSE) {
if (mesh_add_ht_cap_ie(sdata, skb) ||
mesh_add_ht_oper_ie(sdata, skb) ||
mesh_add_vht_cap_ie(sdata, skb) ||
mesh_add_vht_oper_ie(sdata, skb) ||
mesh_add_he_cap_ie(sdata, skb, ie_len_he_cap) ||
mesh_add_he_oper_ie(sdata, skb) ||
mesh_add_he_6ghz_cap_ie(sdata, skb) ||
mesh_add_eht_cap_ie(sdata, skb, ie_len_eht_cap) ||
mesh_add_eht_oper_ie(sdata, skb))
goto free;
}
if (mesh_add_vendor_ies(sdata, skb))
goto free;
ieee80211_tx_skb(sdata, skb);
return 0;
free:
kfree_skb(skb);
return err;
}
/**
* __mesh_plink_deactivate - deactivate mesh peer link
*
* @sta: mesh peer link to deactivate
*
* Mesh paths with this peer as next hop should be flushed
* by the caller outside of plink_lock.
*
* Returns beacon changed flag if the beacon content changed.
*
* Locking: the caller must hold sta->mesh->plink_lock
*/
static u64 __mesh_plink_deactivate(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
u64 changed = 0;
lockdep_assert_held(&sta->mesh->plink_lock);
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
changed = mesh_plink_dec_estab_count(sdata);
sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
ieee80211_mps_sta_status_update(sta);
changed |= ieee80211_mps_set_sta_local_pm(sta,
NL80211_MESH_POWER_UNKNOWN);
return changed;
}
/**
* mesh_plink_deactivate - deactivate mesh peer link
*
* @sta: mesh peer link to deactivate
*
* All mesh paths with this peer as next hop will be flushed
*/
u64 mesh_plink_deactivate(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
u64 changed;
spin_lock_bh(&sta->mesh->plink_lock);
changed = __mesh_plink_deactivate(sta);
if (!sdata->u.mesh.user_mpm) {
sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED;
mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE,
sta->sta.addr, sta->mesh->llid,
sta->mesh->plid, sta->mesh->reason);
}
spin_unlock_bh(&sta->mesh->plink_lock);
if (!sdata->u.mesh.user_mpm)
del_timer_sync(&sta->mesh->plink_timer);
mesh_path_flush_by_nexthop(sta);
/* make sure no readers can access nexthop sta from here on */
synchronize_net();
return changed;
}
static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee802_11_elems *elems)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
u32 rates, basic_rates = 0, changed = 0;
enum ieee80211_sta_rx_bandwidth bw = sta->sta.deflink.bandwidth;
sband = ieee80211_get_sband(sdata);
if (!sband)
return;
rates = ieee80211_sta_get_rates(sdata, elems, sband->band,
&basic_rates);
spin_lock_bh(&sta->mesh->plink_lock);
sta->deflink.rx_stats.last_rx = jiffies;
/* rates and capabilities don't change during peering */
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
sta->mesh->processed_beacon)
goto out;
sta->mesh->processed_beacon = true;
if (sta->sta.deflink.supp_rates[sband->band] != rates)
changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
sta->sta.deflink.supp_rates[sband->band] = rates;
if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems->ht_cap_elem,
&sta->deflink))
changed |= IEEE80211_RC_BW_CHANGED;
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
elems->vht_cap_elem,
&sta->deflink);
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap,
elems->he_cap_len,
elems->he_6ghz_capa,
&sta->deflink);
ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband, elems->he_cap,
elems->he_cap_len,
elems->eht_cap, elems->eht_cap_len,
&sta->deflink);
if (bw != sta->sta.deflink.bandwidth)
changed |= IEEE80211_RC_BW_CHANGED;
/* HT peer is operating 20MHz-only */
if (elems->ht_operation &&
!(elems->ht_operation->ht_param &
IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
if (sta->sta.deflink.bandwidth != IEEE80211_STA_RX_BW_20)
changed |= IEEE80211_RC_BW_CHANGED;
sta->sta.deflink.bandwidth = IEEE80211_STA_RX_BW_20;
}
if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
rate_control_rate_init(sta);
else
rate_control_rate_update(local, sband, sta, 0, changed);
out:
spin_unlock_bh(&sta->mesh->plink_lock);
}
static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata)
{
struct sta_info *sta;
unsigned long *aid_map;
int aid;
aid_map = bitmap_zalloc(IEEE80211_MAX_AID + 1, GFP_KERNEL);
if (!aid_map)
return -ENOMEM;
/* reserve aid 0 for mcast indication */
__set_bit(0, aid_map);
rcu_read_lock();
list_for_each_entry_rcu(sta, &sdata->local->sta_list, list)
__set_bit(sta->sta.aid, aid_map);
rcu_read_unlock();
aid = find_first_zero_bit(aid_map, IEEE80211_MAX_AID + 1);
bitmap_free(aid_map);
if (aid > IEEE80211_MAX_AID)
return -ENOBUFS;
return aid;
}
static struct sta_info *
__mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
{
struct sta_info *sta;
int aid;
if (sdata->local->num_sta >= MESH_MAX_PLINKS)
return NULL;
aid = mesh_allocate_aid(sdata);
if (aid < 0)
return NULL;
sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
if (!sta)
return NULL;
sta->mesh->plink_state = NL80211_PLINK_LISTEN;
sta->sta.wme = true;
sta->sta.aid = aid;
sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
return sta;
}
static struct sta_info *
mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
struct ieee802_11_elems *elems,
struct ieee80211_rx_status *rx_status)
{
struct sta_info *sta = NULL;
/* Userspace handles station allocation */
if (sdata->u.mesh.user_mpm ||
sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
if (mesh_peer_accepts_plinks(elems) &&
mesh_plink_availables(sdata)) {
int sig = 0;
if (ieee80211_hw_check(&sdata->local->hw, SIGNAL_DBM))
sig = rx_status->signal;
cfg80211_notify_new_peer_candidate(sdata->dev, addr,
elems->ie_start,
elems->total_len,
sig, GFP_KERNEL);
}
} else
sta = __mesh_sta_info_alloc(sdata, addr);
return sta;
}
/*
* mesh_sta_info_get - return mesh sta info entry for @addr.
*
* @sdata: local meshif
* @addr: peer's address
* @elems: IEs from beacon or mesh peering frame.
* @rx_status: rx status for the frame for signal reporting
*
* Return existing or newly allocated sta_info under RCU read lock.
* (re)initialize with given IEs.
*/
static struct sta_info *
mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
u8 *addr, struct ieee802_11_elems *elems,
struct ieee80211_rx_status *rx_status) __acquires(RCU)
{
struct sta_info *sta = NULL;
rcu_read_lock();
sta = sta_info_get(sdata, addr);
if (sta) {
mesh_sta_info_init(sdata, sta, elems);
} else {
rcu_read_unlock();
/* can't run atomic */
sta = mesh_sta_info_alloc(sdata, addr, elems, rx_status);
if (!sta) {
rcu_read_lock();
return NULL;
}
mesh_sta_info_init(sdata, sta, elems);
if (sta_info_insert_rcu(sta))
return NULL;
}
return sta;
}
/*
* mesh_neighbour_update - update or initialize new mesh neighbor.
*
* @sdata: local meshif
* @addr: peer's address
* @elems: IEs from beacon or mesh peering frame
* @rx_status: rx status for the frame for signal reporting
*
* Initiates peering if appropriate.
*/
void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
u8 *hw_addr,
struct ieee802_11_elems *elems,
struct ieee80211_rx_status *rx_status)
{
struct sta_info *sta;
u64 changed = 0;
sta = mesh_sta_info_get(sdata, hw_addr, elems, rx_status);
if (!sta)
goto out;
sta->mesh->connected_to_gate = elems->mesh_config->meshconf_form &
IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE;
if (mesh_peer_accepts_plinks(elems) &&
sta->mesh->plink_state == NL80211_PLINK_LISTEN &&
sdata->u.mesh.accepting_plinks &&
sdata->u.mesh.mshcfg.auto_open_plinks &&
rssi_threshold_check(sdata, sta))
changed = mesh_plink_open(sta);
ieee80211_mps_frame_release(sta, elems);
out:
rcu_read_unlock();
ieee80211_mbss_info_change_notify(sdata, changed);
}
void mesh_plink_timer(struct timer_list *t)
{
struct mesh_sta *mesh = from_timer(mesh, t, plink_timer);
struct sta_info *sta;
u16 reason = 0;
struct ieee80211_sub_if_data *sdata;
struct mesh_config *mshcfg;
enum ieee80211_self_protected_actioncode action = 0;
/*
* This STA is valid because sta_info_destroy() will
* del_timer_sync() this timer after having made sure
* it cannot be readded (by deleting the plink.)
*/
sta = mesh->plink_sta;
if (sta->sdata->local->quiescing)
return;
spin_lock_bh(&sta->mesh->plink_lock);
/* If a timer fires just before a state transition on another CPU,
* we may have already extended the timeout and changed state by the
* time we've acquired the lock and arrived here. In that case,
* skip this timer and wait for the new one.
*/
if (time_before(jiffies, sta->mesh->plink_timer.expires)) {
mpl_dbg(sta->sdata,
"Ignoring timer for %pM in state %s (timer adjusted)",
sta->sta.addr, mplstates[sta->mesh->plink_state]);
spin_unlock_bh(&sta->mesh->plink_lock);
return;
}
/* del_timer() and handler may race when entering these states */
if (sta->mesh->plink_state == NL80211_PLINK_LISTEN ||
sta->mesh->plink_state == NL80211_PLINK_ESTAB) {
mpl_dbg(sta->sdata,
"Ignoring timer for %pM in state %s (timer deleted)",
sta->sta.addr, mplstates[sta->mesh->plink_state]);
spin_unlock_bh(&sta->mesh->plink_lock);
return;
}
mpl_dbg(sta->sdata,
"Mesh plink timer for %pM fired on state %s\n",
sta->sta.addr, mplstates[sta->mesh->plink_state]);
sdata = sta->sdata;
mshcfg = &sdata->u.mesh.mshcfg;
switch (sta->mesh->plink_state) {
case NL80211_PLINK_OPN_RCVD:
case NL80211_PLINK_OPN_SNT:
/* retry timer */
if (sta->mesh->plink_retries < mshcfg->dot11MeshMaxRetries) {
u32 rand;
mpl_dbg(sta->sdata,
"Mesh plink for %pM (retry, timeout): %d %d\n",
sta->sta.addr, sta->mesh->plink_retries,
sta->mesh->plink_timeout);
get_random_bytes(&rand, sizeof(u32));
sta->mesh->plink_timeout = sta->mesh->plink_timeout +
rand % sta->mesh->plink_timeout;
++sta->mesh->plink_retries;
mod_plink_timer(sta, sta->mesh->plink_timeout);
action = WLAN_SP_MESH_PEERING_OPEN;
break;
}
reason = WLAN_REASON_MESH_MAX_RETRIES;
fallthrough;
case NL80211_PLINK_CNF_RCVD:
/* confirm timer */
if (!reason)
reason = WLAN_REASON_MESH_CONFIRM_TIMEOUT;
sta->mesh->plink_state = NL80211_PLINK_HOLDING;
mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
action = WLAN_SP_MESH_PEERING_CLOSE;
break;
case NL80211_PLINK_HOLDING:
/* holding timer */
del_timer(&sta->mesh->plink_timer);
mesh_plink_fsm_restart(sta);
break;
default:
break;
}
spin_unlock_bh(&sta->mesh->plink_lock);
if (action)
mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
sta->mesh->llid, sta->mesh->plid, reason);
}
static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout)
{
sta->mesh->plink_timeout = timeout;
mod_timer(&sta->mesh->plink_timer, jiffies + msecs_to_jiffies(timeout));
}
static bool llid_in_use(struct ieee80211_sub_if_data *sdata,
u16 llid)
{
struct ieee80211_local *local = sdata->local;
bool in_use = false;
struct sta_info *sta;
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sdata != sta->sdata)
continue;
if (!memcmp(&sta->mesh->llid, &llid, sizeof(llid))) {
in_use = true;
break;
}
}
rcu_read_unlock();
return in_use;
}
static u16 mesh_get_new_llid(struct ieee80211_sub_if_data *sdata)
{
u16 llid;
do {
get_random_bytes(&llid, sizeof(llid));
} while (llid_in_use(sdata, llid));
return llid;
}
u64 mesh_plink_open(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
u64 changed;
if (!test_sta_flag(sta, WLAN_STA_AUTH))
return 0;
spin_lock_bh(&sta->mesh->plink_lock);
sta->mesh->llid = mesh_get_new_llid(sdata);
if (sta->mesh->plink_state != NL80211_PLINK_LISTEN &&
sta->mesh->plink_state != NL80211_PLINK_BLOCKED) {
spin_unlock_bh(&sta->mesh->plink_lock);
return 0;
}
sta->mesh->plink_state = NL80211_PLINK_OPN_SNT;
mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
spin_unlock_bh(&sta->mesh->plink_lock);
mpl_dbg(sdata,
"Mesh plink: starting establishment with %pM\n",
sta->sta.addr);
/* set the non-peer mode to active during peering */
changed = ieee80211_mps_local_status_update(sdata);
mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_OPEN,
sta->sta.addr, sta->mesh->llid, 0, 0);
return changed;
}
u64 mesh_plink_block(struct sta_info *sta)
{
u64 changed;
spin_lock_bh(&sta->mesh->plink_lock);
changed = __mesh_plink_deactivate(sta);
sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
spin_unlock_bh(&sta->mesh->plink_lock);
mesh_path_flush_by_nexthop(sta);
return changed;
}
static void mesh_plink_close(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
enum plink_event event)
{
struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
u16 reason = (event == CLS_ACPT) ?
WLAN_REASON_MESH_CLOSE : WLAN_REASON_MESH_CONFIG;
sta->mesh->reason = reason;
sta->mesh->plink_state = NL80211_PLINK_HOLDING;
mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
}
static u64 mesh_plink_establish(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
u64 changed = 0;
del_timer(&sta->mesh->plink_timer);
sta->mesh->plink_state = NL80211_PLINK_ESTAB;
changed |= mesh_plink_inc_estab_count(sdata);
changed |= mesh_set_ht_prot_mode(sdata);
changed |= mesh_set_short_slot_time(sdata);
mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n", sta->sta.addr);
ieee80211_mps_sta_status_update(sta);
changed |= ieee80211_mps_set_sta_local_pm(sta, mshcfg->power_mode);
return changed;
}
/**
* mesh_plink_fsm - step @sta MPM based on @event
*
* @sdata: interface
* @sta: mesh neighbor
* @event: peering event
*
* Return: changed MBSS flags
*/
static u64 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, enum plink_event event)
{
struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
enum ieee80211_self_protected_actioncode action = 0;
u64 changed = 0;
bool flush = false;
mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
mplstates[sta->mesh->plink_state], mplevents[event]);
spin_lock_bh(&sta->mesh->plink_lock);
switch (sta->mesh->plink_state) {
case NL80211_PLINK_LISTEN:
switch (event) {
case CLS_ACPT:
mesh_plink_fsm_restart(sta);
break;
case OPN_ACPT:
sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
sta->mesh->llid = mesh_get_new_llid(sdata);
mesh_plink_timer_set(sta,
mshcfg->dot11MeshRetryTimeout);
/* set the non-peer mode to active during peering */
changed |= ieee80211_mps_local_status_update(sdata);
action = WLAN_SP_MESH_PEERING_OPEN;
break;
default:
break;
}
break;
case NL80211_PLINK_OPN_SNT:
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
case CLS_ACPT:
mesh_plink_close(sdata, sta, event);
action = WLAN_SP_MESH_PEERING_CLOSE;
break;
case OPN_ACPT:
/* retry timer is left untouched */
sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
action = WLAN_SP_MESH_PEERING_CONFIRM;
break;
case CNF_ACPT:
sta->mesh->plink_state = NL80211_PLINK_CNF_RCVD;
mod_plink_timer(sta, mshcfg->dot11MeshConfirmTimeout);
break;
default:
break;
}
break;
case NL80211_PLINK_OPN_RCVD:
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
case CLS_ACPT:
mesh_plink_close(sdata, sta, event);
action = WLAN_SP_MESH_PEERING_CLOSE;
break;
case OPN_ACPT:
action = WLAN_SP_MESH_PEERING_CONFIRM;
break;
case CNF_ACPT:
changed |= mesh_plink_establish(sdata, sta);
break;
default:
break;
}
break;
case NL80211_PLINK_CNF_RCVD:
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
case CLS_ACPT:
mesh_plink_close(sdata, sta, event);
action = WLAN_SP_MESH_PEERING_CLOSE;
break;
case OPN_ACPT:
changed |= mesh_plink_establish(sdata, sta);
action = WLAN_SP_MESH_PEERING_CONFIRM;
break;
default:
break;
}
break;
case NL80211_PLINK_ESTAB:
switch (event) {
case CLS_ACPT:
changed |= __mesh_plink_deactivate(sta);
changed |= mesh_set_ht_prot_mode(sdata);
changed |= mesh_set_short_slot_time(sdata);
mesh_plink_close(sdata, sta, event);
action = WLAN_SP_MESH_PEERING_CLOSE;
flush = true;
break;
case OPN_ACPT:
action = WLAN_SP_MESH_PEERING_CONFIRM;
break;
default:
break;
}
break;
case NL80211_PLINK_HOLDING:
switch (event) {
case CLS_ACPT:
del_timer(&sta->mesh->plink_timer);
mesh_plink_fsm_restart(sta);
break;
case OPN_ACPT:
case CNF_ACPT:
case OPN_RJCT:
case CNF_RJCT:
action = WLAN_SP_MESH_PEERING_CLOSE;
break;
default:
break;
}
break;
default:
/* should not get here, PLINK_BLOCKED is dealt with at the
* beginning of the function
*/
break;
}
spin_unlock_bh(&sta->mesh->plink_lock);
if (flush)
mesh_path_flush_by_nexthop(sta);
if (action) {
mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
sta->mesh->llid, sta->mesh->plid,
sta->mesh->reason);
/* also send confirm in open case */
if (action == WLAN_SP_MESH_PEERING_OPEN) {
mesh_plink_frame_tx(sdata, sta,
WLAN_SP_MESH_PEERING_CONFIRM,
sta->sta.addr, sta->mesh->llid,
sta->mesh->plid, 0);
}
}
return changed;
}
/*
* mesh_plink_get_event - get correct MPM event
*
* @sdata: interface
* @sta: peer, leave NULL if processing a frame from a new suitable peer
* @elems: peering management IEs
* @ftype: frame type
* @llid: peer's peer link ID
* @plid: peer's local link ID
*
* Return: new peering event for @sta, but PLINK_UNDEFINED should be treated as
* an error.
*/
static enum plink_event
mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee802_11_elems *elems,
enum ieee80211_self_protected_actioncode ftype,
u16 llid, u16 plid)
{
enum plink_event event = PLINK_UNDEFINED;
u8 ie_len = elems->peering_len;
bool matches_local;
matches_local = (ftype == WLAN_SP_MESH_PEERING_CLOSE ||
mesh_matches_local(sdata, elems));
/* deny open request from non-matching peer */
if (!matches_local && !sta) {
event = OPN_RJCT;
goto out;
}
if (!sta) {
if (ftype != WLAN_SP_MESH_PEERING_OPEN) {
mpl_dbg(sdata, "Mesh plink: cls or cnf from unknown peer\n");
goto out;
}
/* ftype == WLAN_SP_MESH_PEERING_OPEN */
if (!mesh_plink_free_count(sdata)) {
mpl_dbg(sdata, "Mesh plink error: no more free plinks\n");
goto out;
}
/* new matching peer */
event = OPN_ACPT;
goto out;
} else {
if (!test_sta_flag(sta, WLAN_STA_AUTH)) {
mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
goto out;
}
if (sta->mesh->plink_state == NL80211_PLINK_BLOCKED)
goto out;
}
switch (ftype) {
case WLAN_SP_MESH_PEERING_OPEN:
if (!matches_local)
event = OPN_RJCT;
if (!mesh_plink_free_count(sdata) ||
(sta->mesh->plid && sta->mesh->plid != plid))
event = OPN_IGNR;
else
event = OPN_ACPT;
break;
case WLAN_SP_MESH_PEERING_CONFIRM:
if (!matches_local)
event = CNF_RJCT;
if (!mesh_plink_free_count(sdata) ||
sta->mesh->llid != llid ||
(sta->mesh->plid && sta->mesh->plid != plid))
event = CNF_IGNR;
else
event = CNF_ACPT;
break;
case WLAN_SP_MESH_PEERING_CLOSE:
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
/* Do not check for llid or plid. This does not
* follow the standard but since multiple plinks
* per sta are not supported, it is necessary in
* order to avoid a livelock when MP A sees an
* establish peer link to MP B but MP B does not
* see it. This can be caused by a timeout in
* B's peer link establishment or B beign
* restarted.
*/
event = CLS_ACPT;
else if (sta->mesh->plid != plid)
event = CLS_IGNR;
else if (ie_len == 8 && sta->mesh->llid != llid)
event = CLS_IGNR;
else
event = CLS_ACPT;
break;
default:
mpl_dbg(sdata, "Mesh plink: unknown frame subtype\n");
break;
}
out:
return event;
}
static void
mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
struct ieee802_11_elems *elems,
struct ieee80211_rx_status *rx_status)
{
struct sta_info *sta;
enum plink_event event;
enum ieee80211_self_protected_actioncode ftype;
u64 changed = 0;
u8 ie_len = elems->peering_len;
u16 plid, llid = 0;
if (!elems->peering) {
mpl_dbg(sdata,
"Mesh plink: missing necessary peer link ie\n");
return;
}
if (elems->rsn_len &&
sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) {
mpl_dbg(sdata,
"Mesh plink: can't establish link with secure peer\n");
return;
}
ftype = mgmt->u.action.u.self_prot.action_code;
if ((ftype == WLAN_SP_MESH_PEERING_OPEN && ie_len != 4) ||
(ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) ||
(ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6
&& ie_len != 8)) {
mpl_dbg(sdata,
"Mesh plink: incorrect plink ie length %d %d\n",
ftype, ie_len);
return;
}
if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
(!elems->mesh_id || !elems->mesh_config)) {
mpl_dbg(sdata, "Mesh plink: missing necessary ie\n");
return;
}
/* Note the lines below are correct, the llid in the frame is the plid
* from the point of view of this host.
*/
plid = get_unaligned_le16(PLINK_GET_LLID(elems->peering));
if (ftype == WLAN_SP_MESH_PEERING_CONFIRM ||
(ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8))
llid = get_unaligned_le16(PLINK_GET_PLID(elems->peering));
/* WARNING: Only for sta pointer, is dropped & re-acquired */
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
if (ftype == WLAN_SP_MESH_PEERING_OPEN &&
!rssi_threshold_check(sdata, sta)) {
mpl_dbg(sdata, "Mesh plink: %pM does not meet rssi threshold\n",
mgmt->sa);
goto unlock_rcu;
}
/* Now we will figure out the appropriate event... */
event = mesh_plink_get_event(sdata, sta, elems, ftype, llid, plid);
if (event == OPN_ACPT) {
rcu_read_unlock();
/* allocate sta entry if necessary and update info */
sta = mesh_sta_info_get(sdata, mgmt->sa, elems, rx_status);
if (!sta) {
mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
goto unlock_rcu;
}
sta->mesh->plid = plid;
} else if (!sta && event == OPN_RJCT) {
mesh_plink_frame_tx(sdata, NULL, WLAN_SP_MESH_PEERING_CLOSE,
mgmt->sa, 0, plid,
WLAN_REASON_MESH_CONFIG);
goto unlock_rcu;
} else if (!sta || event == PLINK_UNDEFINED) {
/* something went wrong */
goto unlock_rcu;
}
if (event == CNF_ACPT) {
/* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
if (!sta->mesh->plid)
sta->mesh->plid = plid;
sta->mesh->aid = get_unaligned_le16(PLINK_CNF_AID(mgmt));
}
changed |= mesh_plink_fsm(sdata, sta, event);
unlock_rcu:
rcu_read_unlock();
if (changed)
ieee80211_mbss_info_change_notify(sdata, changed);
}
void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len,
struct ieee80211_rx_status *rx_status)
{
struct ieee802_11_elems *elems;
size_t baselen;
u8 *baseaddr;
/* need action_code, aux */
if (len < IEEE80211_MIN_ACTION_SIZE + 3)
return;
if (sdata->u.mesh.user_mpm)
/* userspace must register for these */
return;
if (is_multicast_ether_addr(mgmt->da)) {
mpl_dbg(sdata,
"Mesh plink: ignore frame from multicast address\n");
return;
}
baseaddr = mgmt->u.action.u.self_prot.variable;
baselen = (u8 *) mgmt->u.action.u.self_prot.variable - (u8 *) mgmt;
if (mgmt->u.action.u.self_prot.action_code ==
WLAN_SP_MESH_PEERING_CONFIRM) {
baseaddr += 4;
baselen += 4;
if (baselen > len)
return;
}
elems = ieee802_11_parse_elems(baseaddr, len - baselen, true, NULL);
mesh_process_plink_frame(sdata, mgmt, elems, rx_status);
kfree(elems);
}
| linux-master | net/mac80211/mesh_plink.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* mac80211 ethtool hooks for cfg80211
*
* Copied from cfg.c - originally
* Copyright 2006-2010 Johannes Berg <[email protected]>
* Copyright 2014 Intel Corporation (Author: Johannes Berg)
* Copyright (C) 2018, 2022 Intel Corporation
*/
#include <linux/types.h>
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "sta_info.h"
#include "driver-ops.h"
static int ieee80211_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *rp,
struct kernel_ethtool_ringparam *kernel_rp,
struct netlink_ext_ack *extack)
{
struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy);
if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
return -EINVAL;
return drv_set_ringparam(local, rp->tx_pending, rp->rx_pending);
}
static void ieee80211_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *rp,
struct kernel_ethtool_ringparam *kernel_rp,
struct netlink_ext_ack *extack)
{
struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy);
memset(rp, 0, sizeof(*rp));
drv_get_ringparam(local, &rp->tx_pending, &rp->tx_max_pending,
&rp->rx_pending, &rp->rx_max_pending);
}
static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "rx_bytes",
"rx_duplicates", "rx_fragments", "rx_dropped",
"tx_packets", "tx_bytes",
"tx_filtered", "tx_retry_failed", "tx_retries",
"sta_state", "txrate", "rxrate", "signal",
"channel", "noise", "ch_time", "ch_time_busy",
"ch_time_ext_busy", "ch_time_rx", "ch_time_tx"
};
#define STA_STATS_LEN ARRAY_SIZE(ieee80211_gstrings_sta_stats)
static int ieee80211_get_sset_count(struct net_device *dev, int sset)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
int rv = 0;
if (sset == ETH_SS_STATS)
rv += STA_STATS_LEN;
rv += drv_get_et_sset_count(sdata, sset);
if (rv == 0)
return -EOPNOTSUPP;
return rv;
}
static void ieee80211_get_stats(struct net_device *dev,
struct ethtool_stats *stats,
u64 *data)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *channel;
struct sta_info *sta;
struct ieee80211_local *local = sdata->local;
struct station_info sinfo;
struct survey_info survey;
int i, q;
#define STA_STATS_SURVEY_LEN 7
memset(data, 0, sizeof(u64) * STA_STATS_LEN);
#define ADD_STA_STATS(sta) \
do { \
data[i++] += sinfo.rx_packets; \
data[i++] += sinfo.rx_bytes; \
data[i++] += (sta)->rx_stats.num_duplicates; \
data[i++] += (sta)->rx_stats.fragments; \
data[i++] += sinfo.rx_dropped_misc; \
\
data[i++] += sinfo.tx_packets; \
data[i++] += sinfo.tx_bytes; \
data[i++] += (sta)->status_stats.filtered; \
data[i++] += sinfo.tx_failed; \
data[i++] += sinfo.tx_retries; \
} while (0)
/* For Managed stations, find the single station based on BSSID
* and use that. For interface types, iterate through all available
* stations and add stats for any station that is assigned to this
* network device.
*/
mutex_lock(&local->sta_mtx);
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
sta = sta_info_get_bss(sdata, sdata->deflink.u.mgd.bssid);
if (!(sta && !WARN_ON(sta->sdata->dev != dev)))
goto do_survey;
memset(&sinfo, 0, sizeof(sinfo));
sta_set_sinfo(sta, &sinfo, false);
i = 0;
ADD_STA_STATS(&sta->deflink);
data[i++] = sta->sta_state;
if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))
data[i] = 100000ULL *
cfg80211_calculate_bitrate(&sinfo.txrate);
i++;
if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))
data[i] = 100000ULL *
cfg80211_calculate_bitrate(&sinfo.rxrate);
i++;
if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))
data[i] = (u8)sinfo.signal_avg;
i++;
} else {
list_for_each_entry(sta, &local->sta_list, list) {
/* Make sure this station belongs to the proper dev */
if (sta->sdata->dev != dev)
continue;
memset(&sinfo, 0, sizeof(sinfo));
sta_set_sinfo(sta, &sinfo, false);
i = 0;
ADD_STA_STATS(&sta->deflink);
}
}
do_survey:
i = STA_STATS_LEN - STA_STATS_SURVEY_LEN;
/* Get survey stats for current channel */
survey.filled = 0;
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (chanctx_conf)
channel = chanctx_conf->def.chan;
else
channel = NULL;
rcu_read_unlock();
if (channel) {
q = 0;
do {
survey.filled = 0;
if (drv_get_survey(local, q, &survey) != 0) {
survey.filled = 0;
break;
}
q++;
} while (channel != survey.channel);
}
if (survey.filled)
data[i++] = survey.channel->center_freq;
else
data[i++] = 0;
if (survey.filled & SURVEY_INFO_NOISE_DBM)
data[i++] = (u8)survey.noise;
else
data[i++] = -1LL;
if (survey.filled & SURVEY_INFO_TIME)
data[i++] = survey.time;
else
data[i++] = -1LL;
if (survey.filled & SURVEY_INFO_TIME_BUSY)
data[i++] = survey.time_busy;
else
data[i++] = -1LL;
if (survey.filled & SURVEY_INFO_TIME_EXT_BUSY)
data[i++] = survey.time_ext_busy;
else
data[i++] = -1LL;
if (survey.filled & SURVEY_INFO_TIME_RX)
data[i++] = survey.time_rx;
else
data[i++] = -1LL;
if (survey.filled & SURVEY_INFO_TIME_TX)
data[i++] = survey.time_tx;
else
data[i++] = -1LL;
mutex_unlock(&local->sta_mtx);
if (WARN_ON(i != STA_STATS_LEN))
return;
drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN]));
}
static void ieee80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
int sz_sta_stats = 0;
if (sset == ETH_SS_STATS) {
sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats);
memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats);
}
drv_get_et_strings(sdata, sset, &(data[sz_sta_stats]));
}
static int ieee80211_get_regs_len(struct net_device *dev)
{
return 0;
}
static void ieee80211_get_regs(struct net_device *dev,
struct ethtool_regs *regs,
void *data)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
regs->version = wdev->wiphy->hw_version;
regs->len = 0;
}
const struct ethtool_ops ieee80211_ethtool_ops = {
.get_drvinfo = cfg80211_get_drvinfo,
.get_regs_len = ieee80211_get_regs_len,
.get_regs = ieee80211_get_regs,
.get_link = ethtool_op_get_link,
.get_ringparam = ieee80211_get_ringparam,
.set_ringparam = ieee80211_set_ringparam,
.get_strings = ieee80211_get_strings,
.get_ethtool_stats = ieee80211_get_stats,
.get_sset_count = ieee80211_get_sset_count,
};
| linux-master | net/mac80211/ethtool.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AES-128-CMAC with TLen 16 for IEEE 802.11w BIP
* Copyright 2008, Jouni Malinen <[email protected]>
* Copyright (C) 2020 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/export.h>
#include <linux/err.h>
#include <crypto/aes.h>
#include <net/mac80211.h>
#include "key.h"
#include "aes_cmac.h"
#define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */
#define CMAC_TLEN_256 16 /* CMAC TLen = 128 bits (16 octets) */
#define AAD_LEN 20
static const u8 zero[CMAC_TLEN_256];
void ieee80211_aes_cmac(struct crypto_shash *tfm, const u8 *aad,
const u8 *data, size_t data_len, u8 *mic)
{
SHASH_DESC_ON_STACK(desc, tfm);
u8 out[AES_BLOCK_SIZE];
const __le16 *fc;
desc->tfm = tfm;
crypto_shash_init(desc);
crypto_shash_update(desc, aad, AAD_LEN);
fc = (const __le16 *)aad;
if (ieee80211_is_beacon(*fc)) {
/* mask Timestamp field to zero */
crypto_shash_update(desc, zero, 8);
crypto_shash_update(desc, data + 8, data_len - 8 - CMAC_TLEN);
} else {
crypto_shash_update(desc, data, data_len - CMAC_TLEN);
}
crypto_shash_finup(desc, zero, CMAC_TLEN, out);
memcpy(mic, out, CMAC_TLEN);
}
void ieee80211_aes_cmac_256(struct crypto_shash *tfm, const u8 *aad,
const u8 *data, size_t data_len, u8 *mic)
{
SHASH_DESC_ON_STACK(desc, tfm);
const __le16 *fc;
desc->tfm = tfm;
crypto_shash_init(desc);
crypto_shash_update(desc, aad, AAD_LEN);
fc = (const __le16 *)aad;
if (ieee80211_is_beacon(*fc)) {
/* mask Timestamp field to zero */
crypto_shash_update(desc, zero, 8);
crypto_shash_update(desc, data + 8,
data_len - 8 - CMAC_TLEN_256);
} else {
crypto_shash_update(desc, data, data_len - CMAC_TLEN_256);
}
crypto_shash_finup(desc, zero, CMAC_TLEN_256, mic);
}
struct crypto_shash *ieee80211_aes_cmac_key_setup(const u8 key[],
size_t key_len)
{
struct crypto_shash *tfm;
tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
if (!IS_ERR(tfm)) {
int err = crypto_shash_setkey(tfm, key, key_len);
if (err) {
crypto_free_shash(tfm);
return ERR_PTR(err);
}
}
return tfm;
}
void ieee80211_aes_cmac_key_free(struct crypto_shash *tfm)
{
crypto_free_shash(tfm);
}
| linux-master | net/mac80211/aes_cmac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Portions
* Copyright (C) 2020-2021 Intel Corporation
*/
#include <net/mac80211.h>
#include <net/rtnetlink.h>
#include "ieee80211_i.h"
#include "mesh.h"
#include "driver-ops.h"
#include "led.h"
static void ieee80211_sched_scan_cancel(struct ieee80211_local *local)
{
if (ieee80211_request_sched_scan_stop(local))
return;
cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0);
}
int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
if (!local->open_count)
goto suspend;
local->suspending = true;
mb(); /* make suspending visible before any cancellation */
ieee80211_scan_cancel(local);
ieee80211_dfs_cac_cancel(local);
ieee80211_roc_purge(local, NULL);
ieee80211_del_virtual_monitor(local);
if (ieee80211_hw_check(hw, AMPDU_AGGREGATION) &&
!(wowlan && wowlan->any)) {
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(
sta, AGG_STOP_LOCAL_REQUEST);
}
mutex_unlock(&local->sta_mtx);
}
/* keep sched_scan only in case of 'any' trigger */
if (!(wowlan && wowlan->any))
ieee80211_sched_scan_cancel(local);
ieee80211_stop_queues_by_reason(hw,
IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
false);
/* flush out all packets */
synchronize_net();
ieee80211_flush_queues(local, NULL, true);
local->quiescing = true;
/* make quiescing visible to timers everywhere */
mb();
flush_workqueue(local->workqueue);
/* Don't try to run timers while suspended. */
del_timer_sync(&local->sta_cleanup);
/*
* Note that this particular timer doesn't need to be
* restarted at resume.
*/
cancel_work_sync(&local->dynamic_ps_enable_work);
del_timer_sync(&local->dynamic_ps_timer);
local->wowlan = wowlan;
if (local->wowlan) {
int err;
/* Drivers don't expect to suspend while some operations like
* authenticating or associating are in progress. It doesn't
* make sense anyway to accept that, since the authentication
* or association would never finish since the driver can't do
* that on its own.
* Thus, clean up in-progress auth/assoc first.
*/
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
continue;
ieee80211_mgd_quiesce(sdata);
/* If suspended during TX in progress, and wowlan
* is enabled (connection will be active) there
* can be a race where the driver is put out
* of power-save due to TX and during suspend
* dynamic_ps_timer is cancelled and TX packet
* is flushed, leaving the driver in ACTIVE even
* after resuming until dynamic_ps_timer puts
* driver back in DOZE.
*/
if (sdata->u.mgd.associated &&
sdata->u.mgd.powersave &&
!(local->hw.conf.flags & IEEE80211_CONF_PS)) {
local->hw.conf.flags |= IEEE80211_CONF_PS;
ieee80211_hw_config(local,
IEEE80211_CONF_CHANGE_PS);
}
}
err = drv_suspend(local, wowlan);
if (err < 0) {
local->quiescing = false;
local->wowlan = false;
if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) {
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta,
&local->sta_list, list) {
clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
}
mutex_unlock(&local->sta_mtx);
}
ieee80211_wake_queues_by_reason(hw,
IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
false);
return err;
} else if (err > 0) {
WARN_ON(err != 1);
/* cfg80211 will call back into mac80211 to disconnect
* all interfaces, allow that to proceed properly
*/
ieee80211_wake_queues_by_reason(hw,
IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
false);
return err;
} else {
goto suspend;
}
}
/* remove all interfaces that were created in the driver */
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MONITOR:
continue;
case NL80211_IFTYPE_STATION:
ieee80211_mgd_quiesce(sdata);
break;
default:
break;
}
flush_delayed_work(&sdata->dec_tailroom_needed_wk);
drv_remove_interface(local, sdata);
}
/*
* We disconnected on all interfaces before suspend, all channel
* contexts should be released.
*/
WARN_ON(!list_empty(&local->chanctx_list));
/* stop hardware - this must stop RX */
ieee80211_stop_device(local);
suspend:
local->suspended = true;
/* need suspended to be visible before quiescing is false */
barrier();
local->quiescing = false;
local->suspending = false;
return 0;
}
/*
* __ieee80211_resume() is a static inline which just calls
* ieee80211_reconfig(), which is also needed for hardware
* hang/firmware failure/etc. recovery.
*/
void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif,
struct cfg80211_wowlan_wakeup *wakeup,
gfp_t gfp)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
cfg80211_report_wowlan_wakeup(&sdata->wdev, wakeup, gfp);
}
EXPORT_SYMBOL(ieee80211_report_wowlan_wakeup);
| linux-master | net/mac80211/pm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010-2013 Felix Fietkau <[email protected]>
* Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/random.h>
#include <linux/moduleparam.h>
#include <linux/ieee80211.h>
#include <linux/minmax.h>
#include <net/mac80211.h>
#include "rate.h"
#include "sta_info.h"
#include "rc80211_minstrel_ht.h"
#define AVG_AMPDU_SIZE 16
#define AVG_PKT_SIZE 1200
/* Number of bits for an average sized packet */
#define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3)
/* Number of symbols for a packet with (bps) bits per symbol */
#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
/* Transmission time (nanoseconds) for a packet containing (syms) symbols */
#define MCS_SYMBOL_TIME(sgi, syms) \
(sgi ? \
((syms) * 18000 + 4000) / 5 : /* syms * 3.6 us */ \
((syms) * 1000) << 2 /* syms * 4 us */ \
)
/* Transmit duration for the raw data part of an average sized packet */
#define MCS_DURATION(streams, sgi, bps) \
(MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) / AVG_AMPDU_SIZE)
#define BW_20 0
#define BW_40 1
#define BW_80 2
/*
* Define group sort order: HT40 -> SGI -> #streams
*/
#define GROUP_IDX(_streams, _sgi, _ht40) \
MINSTREL_HT_GROUP_0 + \
MINSTREL_MAX_STREAMS * 2 * _ht40 + \
MINSTREL_MAX_STREAMS * _sgi + \
_streams - 1
#define _MAX(a, b) (((a)>(b))?(a):(b))
#define GROUP_SHIFT(duration) \
_MAX(0, 16 - __builtin_clz(duration))
/* MCS rate information for an MCS group */
#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \
[GROUP_IDX(_streams, _sgi, _ht40)] = { \
.streams = _streams, \
.shift = _s, \
.bw = _ht40, \
.flags = \
IEEE80211_TX_RC_MCS | \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
(_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
.duration = { \
MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \
} \
}
#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \
GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26))
#define MCS_GROUP(_streams, _sgi, _ht40) \
__MCS_GROUP(_streams, _sgi, _ht40, \
MCS_GROUP_SHIFT(_streams, _sgi, _ht40))
#define VHT_GROUP_IDX(_streams, _sgi, _bw) \
(MINSTREL_VHT_GROUP_0 + \
MINSTREL_MAX_STREAMS * 2 * (_bw) + \
MINSTREL_MAX_STREAMS * (_sgi) + \
(_streams) - 1)
#define BW2VBPS(_bw, r3, r2, r1) \
(_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
#define __VHT_GROUP(_streams, _sgi, _bw, _s) \
[VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
.streams = _streams, \
.shift = _s, \
.bw = _bw, \
.flags = \
IEEE80211_TX_RC_VHT_MCS | \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
(_bw == BW_80 ? IEEE80211_TX_RC_80_MHZ_WIDTH : \
_bw == BW_40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
.duration = { \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 117, 54, 26)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 234, 108, 52)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 351, 162, 78)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 468, 216, 104)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 702, 324, 156)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 936, 432, 208)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 1053, 486, 234)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 1170, 540, 260)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 1404, 648, 312)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 1560, 720, 346)) >> _s \
} \
}
#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \
GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 117, 54, 26)))
#define VHT_GROUP(_streams, _sgi, _bw) \
__VHT_GROUP(_streams, _sgi, _bw, \
VHT_GROUP_SHIFT(_streams, _sgi, _bw))
#define CCK_DURATION(_bitrate, _short) \
(1000 * (10 /* SIFS */ + \
(_short ? 72 + 24 : 144 + 48) + \
(8 * (AVG_PKT_SIZE + 4) * 10) / (_bitrate)))
#define CCK_DURATION_LIST(_short, _s) \
CCK_DURATION(10, _short) >> _s, \
CCK_DURATION(20, _short) >> _s, \
CCK_DURATION(55, _short) >> _s, \
CCK_DURATION(110, _short) >> _s
#define __CCK_GROUP(_s) \
[MINSTREL_CCK_GROUP] = { \
.streams = 1, \
.flags = 0, \
.shift = _s, \
.duration = { \
CCK_DURATION_LIST(false, _s), \
CCK_DURATION_LIST(true, _s) \
} \
}
#define CCK_GROUP_SHIFT \
GROUP_SHIFT(CCK_DURATION(10, false))
#define CCK_GROUP __CCK_GROUP(CCK_GROUP_SHIFT)
#define OFDM_DURATION(_bitrate) \
(1000 * (16 /* SIFS + signal ext */ + \
16 /* T_PREAMBLE */ + \
4 /* T_SIGNAL */ + \
4 * (((16 + 80 * (AVG_PKT_SIZE + 4) + 6) / \
((_bitrate) * 4)))))
#define OFDM_DURATION_LIST(_s) \
OFDM_DURATION(60) >> _s, \
OFDM_DURATION(90) >> _s, \
OFDM_DURATION(120) >> _s, \
OFDM_DURATION(180) >> _s, \
OFDM_DURATION(240) >> _s, \
OFDM_DURATION(360) >> _s, \
OFDM_DURATION(480) >> _s, \
OFDM_DURATION(540) >> _s
#define __OFDM_GROUP(_s) \
[MINSTREL_OFDM_GROUP] = { \
.streams = 1, \
.flags = 0, \
.shift = _s, \
.duration = { \
OFDM_DURATION_LIST(_s), \
} \
}
#define OFDM_GROUP_SHIFT \
GROUP_SHIFT(OFDM_DURATION(60))
#define OFDM_GROUP __OFDM_GROUP(OFDM_GROUP_SHIFT)
static bool minstrel_vht_only = true;
module_param(minstrel_vht_only, bool, 0644);
MODULE_PARM_DESC(minstrel_vht_only,
"Use only VHT rates when VHT is supported by sta.");
/*
* To enable sufficiently targeted rate sampling, MCS rates are divided into
* groups, based on the number of streams and flags (HT40, SGI) that they
* use.
*
* Sortorder has to be fixed for GROUP_IDX macro to be applicable:
* BW -> SGI -> #streams
*/
const struct mcs_group minstrel_mcs_groups[] = {
MCS_GROUP(1, 0, BW_20),
MCS_GROUP(2, 0, BW_20),
MCS_GROUP(3, 0, BW_20),
MCS_GROUP(4, 0, BW_20),
MCS_GROUP(1, 1, BW_20),
MCS_GROUP(2, 1, BW_20),
MCS_GROUP(3, 1, BW_20),
MCS_GROUP(4, 1, BW_20),
MCS_GROUP(1, 0, BW_40),
MCS_GROUP(2, 0, BW_40),
MCS_GROUP(3, 0, BW_40),
MCS_GROUP(4, 0, BW_40),
MCS_GROUP(1, 1, BW_40),
MCS_GROUP(2, 1, BW_40),
MCS_GROUP(3, 1, BW_40),
MCS_GROUP(4, 1, BW_40),
CCK_GROUP,
OFDM_GROUP,
VHT_GROUP(1, 0, BW_20),
VHT_GROUP(2, 0, BW_20),
VHT_GROUP(3, 0, BW_20),
VHT_GROUP(4, 0, BW_20),
VHT_GROUP(1, 1, BW_20),
VHT_GROUP(2, 1, BW_20),
VHT_GROUP(3, 1, BW_20),
VHT_GROUP(4, 1, BW_20),
VHT_GROUP(1, 0, BW_40),
VHT_GROUP(2, 0, BW_40),
VHT_GROUP(3, 0, BW_40),
VHT_GROUP(4, 0, BW_40),
VHT_GROUP(1, 1, BW_40),
VHT_GROUP(2, 1, BW_40),
VHT_GROUP(3, 1, BW_40),
VHT_GROUP(4, 1, BW_40),
VHT_GROUP(1, 0, BW_80),
VHT_GROUP(2, 0, BW_80),
VHT_GROUP(3, 0, BW_80),
VHT_GROUP(4, 0, BW_80),
VHT_GROUP(1, 1, BW_80),
VHT_GROUP(2, 1, BW_80),
VHT_GROUP(3, 1, BW_80),
VHT_GROUP(4, 1, BW_80),
};
const s16 minstrel_cck_bitrates[4] = { 10, 20, 55, 110 };
const s16 minstrel_ofdm_bitrates[8] = { 60, 90, 120, 180, 240, 360, 480, 540 };
static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
static const u8 minstrel_sample_seq[] = {
MINSTREL_SAMPLE_TYPE_INC,
MINSTREL_SAMPLE_TYPE_JUMP,
MINSTREL_SAMPLE_TYPE_INC,
MINSTREL_SAMPLE_TYPE_JUMP,
MINSTREL_SAMPLE_TYPE_INC,
MINSTREL_SAMPLE_TYPE_SLOW,
};
static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
/*
* Some VHT MCSes are invalid (when Ndbps / Nes is not an integer)
* e.g for MCS9@20MHzx1Nss: Ndbps=8x52*(5/6) Nes=1
*
* Returns the valid mcs map for struct minstrel_mcs_group_data.supported
*/
static u16
minstrel_get_valid_vht_rates(int bw, int nss, __le16 mcs_map)
{
u16 mask = 0;
if (bw == BW_20) {
if (nss != 3 && nss != 6)
mask = BIT(9);
} else if (bw == BW_80) {
if (nss == 3 || nss == 7)
mask = BIT(6);
else if (nss == 6)
mask = BIT(9);
} else {
WARN_ON(bw != BW_40);
}
switch ((le16_to_cpu(mcs_map) >> (2 * (nss - 1))) & 3) {
case IEEE80211_VHT_MCS_SUPPORT_0_7:
mask |= 0x300;
break;
case IEEE80211_VHT_MCS_SUPPORT_0_8:
mask |= 0x200;
break;
case IEEE80211_VHT_MCS_SUPPORT_0_9:
break;
default:
mask = 0x3ff;
}
return 0x3ff & ~mask;
}
static bool
minstrel_ht_is_legacy_group(int group)
{
return group == MINSTREL_CCK_GROUP ||
group == MINSTREL_OFDM_GROUP;
}
/*
* Look up an MCS group index based on mac80211 rate information
*/
static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{
return GROUP_IDX((rate->idx / 8) + 1,
!!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
!!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
}
/*
* Look up an MCS group index based on new cfg80211 rate_info.
*/
static int
minstrel_ht_ri_get_group_idx(struct rate_info *rate)
{
return GROUP_IDX((rate->mcs / 8) + 1,
!!(rate->flags & RATE_INFO_FLAGS_SHORT_GI),
!!(rate->bw & RATE_INFO_BW_40));
}
static int
minstrel_vht_get_group_idx(struct ieee80211_tx_rate *rate)
{
return VHT_GROUP_IDX(ieee80211_rate_get_vht_nss(rate),
!!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
!!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) +
2*!!(rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH));
}
/*
* Look up an MCS group index based on new cfg80211 rate_info.
*/
static int
minstrel_vht_ri_get_group_idx(struct rate_info *rate)
{
return VHT_GROUP_IDX(rate->nss,
!!(rate->flags & RATE_INFO_FLAGS_SHORT_GI),
!!(rate->bw & RATE_INFO_BW_40) +
2*!!(rate->bw & RATE_INFO_BW_80));
}
static struct minstrel_rate_stats *
minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_tx_rate *rate)
{
int group, idx;
if (rate->flags & IEEE80211_TX_RC_MCS) {
group = minstrel_ht_get_group_idx(rate);
idx = rate->idx % 8;
goto out;
}
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
group = minstrel_vht_get_group_idx(rate);
idx = ieee80211_rate_get_vht_mcs(rate);
goto out;
}
group = MINSTREL_CCK_GROUP;
for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++) {
if (!(mi->supported[group] & BIT(idx)))
continue;
if (rate->idx != mp->cck_rates[idx])
continue;
/* short preamble */
if ((mi->supported[group] & BIT(idx + 4)) &&
(rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE))
idx += 4;
goto out;
}
group = MINSTREL_OFDM_GROUP;
for (idx = 0; idx < ARRAY_SIZE(mp->ofdm_rates[0]); idx++)
if (rate->idx == mp->ofdm_rates[mi->band][idx])
goto out;
idx = 0;
out:
return &mi->groups[group].rates[idx];
}
/*
* Get the minstrel rate statistics for specified STA and rate info.
*/
static struct minstrel_rate_stats *
minstrel_ht_ri_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_rate_status *rate_status)
{
int group, idx;
struct rate_info *rate = &rate_status->rate_idx;
if (rate->flags & RATE_INFO_FLAGS_MCS) {
group = minstrel_ht_ri_get_group_idx(rate);
idx = rate->mcs % 8;
goto out;
}
if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) {
group = minstrel_vht_ri_get_group_idx(rate);
idx = rate->mcs;
goto out;
}
group = MINSTREL_CCK_GROUP;
for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++) {
if (rate->legacy != minstrel_cck_bitrates[ mp->cck_rates[idx] ])
continue;
/* short preamble */
if ((mi->supported[group] & BIT(idx + 4)) &&
mi->use_short_preamble)
idx += 4;
goto out;
}
group = MINSTREL_OFDM_GROUP;
for (idx = 0; idx < ARRAY_SIZE(mp->ofdm_rates[0]); idx++)
if (rate->legacy == minstrel_ofdm_bitrates[ mp->ofdm_rates[mi->band][idx] ])
goto out;
idx = 0;
out:
return &mi->groups[group].rates[idx];
}
static inline struct minstrel_rate_stats *
minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
{
return &mi->groups[MI_RATE_GROUP(index)].rates[MI_RATE_IDX(index)];
}
static inline int minstrel_get_duration(int index)
{
const struct mcs_group *group = &minstrel_mcs_groups[MI_RATE_GROUP(index)];
unsigned int duration = group->duration[MI_RATE_IDX(index)];
return duration << group->shift;
}
static unsigned int
minstrel_ht_avg_ampdu_len(struct minstrel_ht_sta *mi)
{
int duration;
if (mi->avg_ampdu_len)
return MINSTREL_TRUNC(mi->avg_ampdu_len);
if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_tp_rate[0])))
return 1;
duration = minstrel_get_duration(mi->max_tp_rate[0]);
if (duration > 400 * 1000)
return 2;
if (duration > 250 * 1000)
return 4;
if (duration > 150 * 1000)
return 8;
return 16;
}
/*
* Return current throughput based on the average A-MPDU length, taking into
* account the expected number of retransmissions and their expected length
*/
int
minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
int prob_avg)
{
unsigned int nsecs = 0, overhead = mi->overhead;
unsigned int ampdu_len = 1;
/* do not account throughput if success prob is below 10% */
if (prob_avg < MINSTREL_FRAC(10, 100))
return 0;
if (minstrel_ht_is_legacy_group(group))
overhead = mi->overhead_legacy;
else
ampdu_len = minstrel_ht_avg_ampdu_len(mi);
nsecs = 1000 * overhead / ampdu_len;
nsecs += minstrel_mcs_groups[group].duration[rate] <<
minstrel_mcs_groups[group].shift;
/*
* For the throughput calculation, limit the probability value to 90% to
* account for collision related packet error rate fluctuation
* (prob is scaled - see MINSTREL_FRAC above)
*/
if (prob_avg > MINSTREL_FRAC(90, 100))
prob_avg = MINSTREL_FRAC(90, 100);
return MINSTREL_TRUNC(100 * ((prob_avg * 1000000) / nsecs));
}
/*
* Find & sort topmost throughput rates
*
* If multiple rates provide equal throughput the sorting is based on their
* current success probability. Higher success probability is preferred among
* MCS groups, CCK rates do not provide aggregation and are therefore at last.
*/
static void
minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index,
u16 *tp_list)
{
int cur_group, cur_idx, cur_tp_avg, cur_prob;
int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob;
int j = MAX_THR_RATES;
cur_group = MI_RATE_GROUP(index);
cur_idx = MI_RATE_IDX(index);
cur_prob = mi->groups[cur_group].rates[cur_idx].prob_avg;
cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob);
do {
tmp_group = MI_RATE_GROUP(tp_list[j - 1]);
tmp_idx = MI_RATE_IDX(tp_list[j - 1]);
tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx,
tmp_prob);
if (cur_tp_avg < tmp_tp_avg ||
(cur_tp_avg == tmp_tp_avg && cur_prob <= tmp_prob))
break;
j--;
} while (j > 0);
if (j < MAX_THR_RATES - 1) {
memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
(MAX_THR_RATES - (j + 1))));
}
if (j < MAX_THR_RATES)
tp_list[j] = index;
}
/*
* Find and set the topmost probability rate per sta and per group
*/
static void
minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 *dest, u16 index)
{
struct minstrel_mcs_group_data *mg;
struct minstrel_rate_stats *mrs;
int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob;
int max_tp_group, max_tp_idx, max_tp_prob;
int cur_tp_avg, cur_group, cur_idx;
int max_gpr_group, max_gpr_idx;
int max_gpr_tp_avg, max_gpr_prob;
cur_group = MI_RATE_GROUP(index);
cur_idx = MI_RATE_IDX(index);
mg = &mi->groups[cur_group];
mrs = &mg->rates[cur_idx];
tmp_group = MI_RATE_GROUP(*dest);
tmp_idx = MI_RATE_IDX(*dest);
tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
/* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
* MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
max_tp_group = MI_RATE_GROUP(mi->max_tp_rate[0]);
max_tp_idx = MI_RATE_IDX(mi->max_tp_rate[0]);
max_tp_prob = mi->groups[max_tp_group].rates[max_tp_idx].prob_avg;
if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(index)) &&
!minstrel_ht_is_legacy_group(max_tp_group))
return;
/* skip rates faster than max tp rate with lower prob */
if (minstrel_get_duration(mi->max_tp_rate[0]) > minstrel_get_duration(index) &&
mrs->prob_avg < max_tp_prob)
return;
max_gpr_group = MI_RATE_GROUP(mg->max_group_prob_rate);
max_gpr_idx = MI_RATE_IDX(mg->max_group_prob_rate);
max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg;
if (mrs->prob_avg > MINSTREL_FRAC(75, 100)) {
cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
mrs->prob_avg);
if (cur_tp_avg > tmp_tp_avg)
*dest = index;
max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group,
max_gpr_idx,
max_gpr_prob);
if (cur_tp_avg > max_gpr_tp_avg)
mg->max_group_prob_rate = index;
} else {
if (mrs->prob_avg > tmp_prob)
*dest = index;
if (mrs->prob_avg > max_gpr_prob)
mg->max_group_prob_rate = index;
}
}
/*
* Assign new rate set per sta and use CCK rates only if the fastest
* rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted
* rate sets where MCS and CCK rates are mixed, because CCK rates can
* not use aggregation.
*/
static void
minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
u16 tmp_mcs_tp_rate[MAX_THR_RATES],
u16 tmp_legacy_tp_rate[MAX_THR_RATES])
{
unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp, tmp_prob;
int i;
tmp_group = MI_RATE_GROUP(tmp_legacy_tp_rate[0]);
tmp_idx = MI_RATE_IDX(tmp_legacy_tp_rate[0]);
tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
tmp_group = MI_RATE_GROUP(tmp_mcs_tp_rate[0]);
tmp_idx = MI_RATE_IDX(tmp_mcs_tp_rate[0]);
tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
if (tmp_cck_tp > tmp_mcs_tp) {
for(i = 0; i < MAX_THR_RATES; i++) {
minstrel_ht_sort_best_tp_rates(mi, tmp_legacy_tp_rate[i],
tmp_mcs_tp_rate);
}
}
}
/*
* Try to increase robustness of max_prob rate by decrease number of
* streams if possible.
*/
static inline void
minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
{
struct minstrel_mcs_group_data *mg;
int tmp_max_streams, group, tmp_idx, tmp_prob;
int tmp_tp = 0;
if (!mi->sta->deflink.ht_cap.ht_supported)
return;
group = MI_RATE_GROUP(mi->max_tp_rate[0]);
tmp_max_streams = minstrel_mcs_groups[group].streams;
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
mg = &mi->groups[group];
if (!mi->supported[group] || group == MINSTREL_CCK_GROUP)
continue;
tmp_idx = MI_RATE_IDX(mg->max_group_prob_rate);
tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg;
if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) &&
(minstrel_mcs_groups[group].streams < tmp_max_streams)) {
mi->max_prob_rate = mg->max_group_prob_rate;
tmp_tp = minstrel_ht_get_tp_avg(mi, group,
tmp_idx,
tmp_prob);
}
}
}
static u16
__minstrel_ht_get_sample_rate(struct minstrel_ht_sta *mi,
enum minstrel_sample_type type)
{
u16 *rates = mi->sample[type].sample_rates;
u16 cur;
int i;
for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) {
if (!rates[i])
continue;
cur = rates[i];
rates[i] = 0;
return cur;
}
return 0;
}
static inline int
minstrel_ewma(int old, int new, int weight)
{
int diff, incr;
diff = new - old;
incr = (EWMA_DIV - weight) * diff / EWMA_DIV;
return old + incr;
}
static inline int minstrel_filter_avg_add(u16 *prev_1, u16 *prev_2, s32 in)
{
s32 out_1 = *prev_1;
s32 out_2 = *prev_2;
s32 val;
if (!in)
in += 1;
if (!out_1) {
val = out_1 = in;
goto out;
}
val = MINSTREL_AVG_COEFF1 * in;
val += MINSTREL_AVG_COEFF2 * out_1;
val += MINSTREL_AVG_COEFF3 * out_2;
val >>= MINSTREL_SCALE;
if (val > 1 << MINSTREL_SCALE)
val = 1 << MINSTREL_SCALE;
if (val < 0)
val = 1;
out:
*prev_2 = out_1;
*prev_1 = val;
return val;
}
/*
* Recalculate statistics and counters of a given rate
*/
static void
minstrel_ht_calc_rate_stats(struct minstrel_priv *mp,
struct minstrel_rate_stats *mrs)
{
unsigned int cur_prob;
if (unlikely(mrs->attempts > 0)) {
cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
minstrel_filter_avg_add(&mrs->prob_avg,
&mrs->prob_avg_1, cur_prob);
mrs->att_hist += mrs->attempts;
mrs->succ_hist += mrs->success;
}
mrs->last_success = mrs->success;
mrs->last_attempts = mrs->attempts;
mrs->success = 0;
mrs->attempts = 0;
}
static bool
minstrel_ht_find_sample_rate(struct minstrel_ht_sta *mi, int type, int idx)
{
int i;
for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) {
u16 cur = mi->sample[type].sample_rates[i];
if (cur == idx)
return true;
if (!cur)
break;
}
return false;
}
static int
minstrel_ht_move_sample_rates(struct minstrel_ht_sta *mi, int type,
u32 fast_rate_dur, u32 slow_rate_dur)
{
u16 *rates = mi->sample[type].sample_rates;
int i, j;
for (i = 0, j = 0; i < MINSTREL_SAMPLE_RATES; i++) {
u32 duration;
bool valid = false;
u16 cur;
cur = rates[i];
if (!cur)
continue;
duration = minstrel_get_duration(cur);
switch (type) {
case MINSTREL_SAMPLE_TYPE_SLOW:
valid = duration > fast_rate_dur &&
duration < slow_rate_dur;
break;
case MINSTREL_SAMPLE_TYPE_INC:
case MINSTREL_SAMPLE_TYPE_JUMP:
valid = duration < fast_rate_dur;
break;
default:
valid = false;
break;
}
if (!valid) {
rates[i] = 0;
continue;
}
if (i == j)
continue;
rates[j++] = cur;
rates[i] = 0;
}
return j;
}
static int
minstrel_ht_group_min_rate_offset(struct minstrel_ht_sta *mi, int group,
u32 max_duration)
{
u16 supported = mi->supported[group];
int i;
for (i = 0; i < MCS_GROUP_RATES && supported; i++, supported >>= 1) {
if (!(supported & BIT(0)))
continue;
if (minstrel_get_duration(MI_RATE(group, i)) >= max_duration)
continue;
return i;
}
return -1;
}
/*
* Incremental update rates:
* Flip through groups and pick the first group rate that is faster than the
* highest currently selected rate
*/
static u16
minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur)
{
u8 type = MINSTREL_SAMPLE_TYPE_INC;
int i, index = 0;
u8 group;
group = mi->sample[type].sample_group;
for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
index = minstrel_ht_group_min_rate_offset(mi, group,
fast_rate_dur);
if (index < 0)
continue;
index = MI_RATE(group, index & 0xf);
if (!minstrel_ht_find_sample_rate(mi, type, index))
goto out;
}
index = 0;
out:
mi->sample[type].sample_group = group;
return index;
}
static int
minstrel_ht_next_group_sample_rate(struct minstrel_ht_sta *mi, int group,
u16 supported, int offset)
{
struct minstrel_mcs_group_data *mg = &mi->groups[group];
u16 idx;
int i;
for (i = 0; i < MCS_GROUP_RATES; i++) {
idx = sample_table[mg->column][mg->index];
if (++mg->index >= MCS_GROUP_RATES) {
mg->index = 0;
if (++mg->column >= ARRAY_SIZE(sample_table))
mg->column = 0;
}
if (idx < offset)
continue;
if (!(supported & BIT(idx)))
continue;
return MI_RATE(group, idx);
}
return -1;
}
/*
* Jump rates:
* Sample random rates, use those that are faster than the highest
* currently selected rate. Rates between the fastest and the slowest
* get sorted into the slow sample bucket, but only if it has room
*/
static u16
minstrel_ht_next_jump_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur,
u32 slow_rate_dur, int *slow_rate_ofs)
{
struct minstrel_rate_stats *mrs;
u32 max_duration = slow_rate_dur;
int i, index, offset;
u16 *slow_rates;
u16 supported;
u32 duration;
u8 group;
if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
max_duration = fast_rate_dur;
slow_rates = mi->sample[MINSTREL_SAMPLE_TYPE_SLOW].sample_rates;
group = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group;
for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
u8 type;
group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
supported = mi->supported[group];
if (!supported)
continue;
offset = minstrel_ht_group_min_rate_offset(mi, group,
max_duration);
if (offset < 0)
continue;
index = minstrel_ht_next_group_sample_rate(mi, group, supported,
offset);
if (index < 0)
continue;
duration = minstrel_get_duration(index);
if (duration < fast_rate_dur)
type = MINSTREL_SAMPLE_TYPE_JUMP;
else
type = MINSTREL_SAMPLE_TYPE_SLOW;
if (minstrel_ht_find_sample_rate(mi, type, index))
continue;
if (type == MINSTREL_SAMPLE_TYPE_JUMP)
goto found;
if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
continue;
if (duration >= slow_rate_dur)
continue;
/* skip slow rates with high success probability */
mrs = minstrel_get_ratestats(mi, index);
if (mrs->prob_avg > MINSTREL_FRAC(95, 100))
continue;
slow_rates[(*slow_rate_ofs)++] = index;
if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
max_duration = fast_rate_dur;
}
index = 0;
found:
mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group = group;
return index;
}
static void
minstrel_ht_refill_sample_rates(struct minstrel_ht_sta *mi)
{
u32 prob_dur = minstrel_get_duration(mi->max_prob_rate);
u32 tp_dur = minstrel_get_duration(mi->max_tp_rate[0]);
u32 tp2_dur = minstrel_get_duration(mi->max_tp_rate[1]);
u32 fast_rate_dur = min(min(tp_dur, tp2_dur), prob_dur);
u32 slow_rate_dur = max(max(tp_dur, tp2_dur), prob_dur);
u16 *rates;
int i, j;
rates = mi->sample[MINSTREL_SAMPLE_TYPE_INC].sample_rates;
i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_INC,
fast_rate_dur, slow_rate_dur);
while (i < MINSTREL_SAMPLE_RATES) {
rates[i] = minstrel_ht_next_inc_rate(mi, tp_dur);
if (!rates[i])
break;
i++;
}
rates = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_rates;
i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_JUMP,
fast_rate_dur, slow_rate_dur);
j = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_SLOW,
fast_rate_dur, slow_rate_dur);
while (i < MINSTREL_SAMPLE_RATES) {
rates[i] = minstrel_ht_next_jump_rate(mi, fast_rate_dur,
slow_rate_dur, &j);
if (!rates[i])
break;
i++;
}
for (i = 0; i < ARRAY_SIZE(mi->sample); i++)
memcpy(mi->sample[i].cur_sample_rates, mi->sample[i].sample_rates,
sizeof(mi->sample[i].cur_sample_rates));
}
/*
* Update rate statistics and select new primary rates
*
* Rules for rate selection:
* - max_prob_rate must use only one stream, as a tradeoff between delivery
* probability and throughput during strong fluctuations
* - as long as the max prob rate has a probability of more than 75%, pick
* higher throughput rates, even if the probablity is a bit lower
*/
static void
minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct minstrel_mcs_group_data *mg;
struct minstrel_rate_stats *mrs;
int group, i, j, cur_prob;
u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
u16 tmp_legacy_tp_rate[MAX_THR_RATES], tmp_max_prob_rate;
u16 index;
bool ht_supported = mi->sta->deflink.ht_cap.ht_supported;
if (mi->ampdu_packets > 0) {
if (!ieee80211_hw_check(mp->hw, TX_STATUS_NO_AMPDU_LEN))
mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets),
EWMA_LEVEL);
else
mi->avg_ampdu_len = 0;
mi->ampdu_len = 0;
mi->ampdu_packets = 0;
}
if (mi->supported[MINSTREL_CCK_GROUP])
group = MINSTREL_CCK_GROUP;
else if (mi->supported[MINSTREL_OFDM_GROUP])
group = MINSTREL_OFDM_GROUP;
else
group = 0;
index = MI_RATE(group, 0);
for (j = 0; j < ARRAY_SIZE(tmp_legacy_tp_rate); j++)
tmp_legacy_tp_rate[j] = index;
if (mi->supported[MINSTREL_VHT_GROUP_0])
group = MINSTREL_VHT_GROUP_0;
else if (ht_supported)
group = MINSTREL_HT_GROUP_0;
else if (mi->supported[MINSTREL_CCK_GROUP])
group = MINSTREL_CCK_GROUP;
else
group = MINSTREL_OFDM_GROUP;
index = MI_RATE(group, 0);
tmp_max_prob_rate = index;
for (j = 0; j < ARRAY_SIZE(tmp_mcs_tp_rate); j++)
tmp_mcs_tp_rate[j] = index;
/* Find best rate sets within all MCS groups*/
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
u16 *tp_rate = tmp_mcs_tp_rate;
u16 last_prob = 0;
mg = &mi->groups[group];
if (!mi->supported[group])
continue;
/* (re)Initialize group rate indexes */
for(j = 0; j < MAX_THR_RATES; j++)
tmp_group_tp_rate[j] = MI_RATE(group, 0);
if (group == MINSTREL_CCK_GROUP && ht_supported)
tp_rate = tmp_legacy_tp_rate;
for (i = MCS_GROUP_RATES - 1; i >= 0; i--) {
if (!(mi->supported[group] & BIT(i)))
continue;
index = MI_RATE(group, i);
mrs = &mg->rates[i];
mrs->retry_updated = false;
minstrel_ht_calc_rate_stats(mp, mrs);
if (mrs->att_hist)
last_prob = max(last_prob, mrs->prob_avg);
else
mrs->prob_avg = max(last_prob, mrs->prob_avg);
cur_prob = mrs->prob_avg;
if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
continue;
/* Find max throughput rate set */
minstrel_ht_sort_best_tp_rates(mi, index, tp_rate);
/* Find max throughput rate set within a group */
minstrel_ht_sort_best_tp_rates(mi, index,
tmp_group_tp_rate);
}
memcpy(mg->max_group_tp_rate, tmp_group_tp_rate,
sizeof(mg->max_group_tp_rate));
}
/* Assign new rate set per sta */
minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate,
tmp_legacy_tp_rate);
memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate));
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
if (!mi->supported[group])
continue;
mg = &mi->groups[group];
mg->max_group_prob_rate = MI_RATE(group, 0);
for (i = 0; i < MCS_GROUP_RATES; i++) {
if (!(mi->supported[group] & BIT(i)))
continue;
index = MI_RATE(group, i);
/* Find max probability rate per group and global */
minstrel_ht_set_best_prob_rate(mi, &tmp_max_prob_rate,
index);
}
}
mi->max_prob_rate = tmp_max_prob_rate;
/* Try to increase robustness of max_prob_rate*/
minstrel_ht_prob_rate_reduce_streams(mi);
minstrel_ht_refill_sample_rates(mi);
#ifdef CONFIG_MAC80211_DEBUGFS
/* use fixed index if set */
if (mp->fixed_rate_idx != -1) {
for (i = 0; i < 4; i++)
mi->max_tp_rate[i] = mp->fixed_rate_idx;
mi->max_prob_rate = mp->fixed_rate_idx;
}
#endif
/* Reset update timer */
mi->last_stats_update = jiffies;
mi->sample_time = jiffies;
}
static bool
minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_tx_rate *rate)
{
int i;
if (rate->idx < 0)
return false;
if (!rate->count)
return false;
if (rate->flags & IEEE80211_TX_RC_MCS ||
rate->flags & IEEE80211_TX_RC_VHT_MCS)
return true;
for (i = 0; i < ARRAY_SIZE(mp->cck_rates); i++)
if (rate->idx == mp->cck_rates[i])
return true;
for (i = 0; i < ARRAY_SIZE(mp->ofdm_rates[0]); i++)
if (rate->idx == mp->ofdm_rates[mi->band][i])
return true;
return false;
}
/*
* Check whether rate_status contains valid information.
*/
static bool
minstrel_ht_ri_txstat_valid(struct minstrel_priv *mp,
struct minstrel_ht_sta *mi,
struct ieee80211_rate_status *rate_status)
{
int i;
if (!rate_status)
return false;
if (!rate_status->try_count)
return false;
if (rate_status->rate_idx.flags & RATE_INFO_FLAGS_MCS ||
rate_status->rate_idx.flags & RATE_INFO_FLAGS_VHT_MCS)
return true;
for (i = 0; i < ARRAY_SIZE(mp->cck_rates); i++) {
if (rate_status->rate_idx.legacy ==
minstrel_cck_bitrates[ mp->cck_rates[i] ])
return true;
}
for (i = 0; i < ARRAY_SIZE(mp->ofdm_rates); i++) {
if (rate_status->rate_idx.legacy ==
minstrel_ofdm_bitrates[ mp->ofdm_rates[mi->band][i] ])
return true;
}
return false;
}
static void
minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
{
int group, orig_group;
orig_group = group = MI_RATE_GROUP(*idx);
while (group > 0) {
group--;
if (!mi->supported[group])
continue;
if (minstrel_mcs_groups[group].streams >
minstrel_mcs_groups[orig_group].streams)
continue;
if (primary)
*idx = mi->groups[group].max_group_tp_rate[0];
else
*idx = mi->groups[group].max_group_tp_rate[1];
break;
}
}
static void
minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
void *priv_sta, struct ieee80211_tx_status *st)
{
struct ieee80211_tx_info *info = st->info;
struct minstrel_ht_sta *mi = priv_sta;
struct ieee80211_tx_rate *ar = info->status.rates;
struct minstrel_rate_stats *rate, *rate2;
struct minstrel_priv *mp = priv;
u32 update_interval = mp->update_interval;
bool last, update = false;
int i;
/* Ignore packet that was sent with noAck flag */
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
/* This packet was aggregated but doesn't carry status info */
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) {
info->status.ampdu_ack_len =
(info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
info->status.ampdu_len = 1;
}
/* wraparound */
if (mi->total_packets >= ~0 - info->status.ampdu_len) {
mi->total_packets = 0;
mi->sample_packets = 0;
}
mi->total_packets += info->status.ampdu_len;
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
mi->sample_packets += info->status.ampdu_len;
mi->ampdu_packets++;
mi->ampdu_len += info->status.ampdu_len;
if (st->rates && st->n_rates) {
last = !minstrel_ht_ri_txstat_valid(mp, mi, &(st->rates[0]));
for (i = 0; !last; i++) {
last = (i == st->n_rates - 1) ||
!minstrel_ht_ri_txstat_valid(mp, mi,
&(st->rates[i + 1]));
rate = minstrel_ht_ri_get_stats(mp, mi,
&(st->rates[i]));
if (last)
rate->success += info->status.ampdu_ack_len;
rate->attempts += st->rates[i].try_count *
info->status.ampdu_len;
}
} else {
last = !minstrel_ht_txstat_valid(mp, mi, &ar[0]);
for (i = 0; !last; i++) {
last = (i == IEEE80211_TX_MAX_RATES - 1) ||
!minstrel_ht_txstat_valid(mp, mi, &ar[i + 1]);
rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
if (last)
rate->success += info->status.ampdu_ack_len;
rate->attempts += ar[i].count * info->status.ampdu_len;
}
}
if (mp->hw->max_rates > 1) {
/*
* check for sudden death of spatial multiplexing,
* downgrade to a lower number of streams if necessary.
*/
rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
if (rate->attempts > 30 &&
rate->success < rate->attempts / 4) {
minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
update = true;
}
rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
if (rate2->attempts > 30 &&
rate2->success < rate2->attempts / 4) {
minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
update = true;
}
}
if (time_after(jiffies, mi->last_stats_update + update_interval)) {
update = true;
minstrel_ht_update_stats(mp, mi);
}
if (update)
minstrel_ht_update_rates(mp, mi);
}
static void
minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
int index)
{
struct minstrel_rate_stats *mrs;
unsigned int tx_time, tx_time_rtscts, tx_time_data;
unsigned int cw = mp->cw_min;
unsigned int ctime = 0;
unsigned int t_slot = 9; /* FIXME */
unsigned int ampdu_len = minstrel_ht_avg_ampdu_len(mi);
unsigned int overhead = 0, overhead_rtscts = 0;
mrs = minstrel_get_ratestats(mi, index);
if (mrs->prob_avg < MINSTREL_FRAC(1, 10)) {
mrs->retry_count = 1;
mrs->retry_count_rtscts = 1;
return;
}
mrs->retry_count = 2;
mrs->retry_count_rtscts = 2;
mrs->retry_updated = true;
tx_time_data = minstrel_get_duration(index) * ampdu_len / 1000;
/* Contention time for first 2 tries */
ctime = (t_slot * cw) >> 1;
cw = min((cw << 1) | 1, mp->cw_max);
ctime += (t_slot * cw) >> 1;
cw = min((cw << 1) | 1, mp->cw_max);
if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(index))) {
overhead = mi->overhead_legacy;
overhead_rtscts = mi->overhead_legacy_rtscts;
} else {
overhead = mi->overhead;
overhead_rtscts = mi->overhead_rtscts;
}
/* Total TX time for data and Contention after first 2 tries */
tx_time = ctime + 2 * (overhead + tx_time_data);
tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data);
/* See how many more tries we can fit inside segment size */
do {
/* Contention time for this try */
ctime = (t_slot * cw) >> 1;
cw = min((cw << 1) | 1, mp->cw_max);
/* Total TX time after this try */
tx_time += ctime + overhead + tx_time_data;
tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
if (tx_time_rtscts < mp->segment_size)
mrs->retry_count_rtscts++;
} while ((tx_time < mp->segment_size) &&
(++mrs->retry_count < mp->max_retry));
}
static void
minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_sta_rates *ratetbl, int offset, int index)
{
int group_idx = MI_RATE_GROUP(index);
const struct mcs_group *group = &minstrel_mcs_groups[group_idx];
struct minstrel_rate_stats *mrs;
u8 idx;
u16 flags = group->flags;
mrs = minstrel_get_ratestats(mi, index);
if (!mrs->retry_updated)
minstrel_calc_retransmit(mp, mi, index);
if (mrs->prob_avg < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
ratetbl->rate[offset].count = 2;
ratetbl->rate[offset].count_rts = 2;
ratetbl->rate[offset].count_cts = 2;
} else {
ratetbl->rate[offset].count = mrs->retry_count;
ratetbl->rate[offset].count_cts = mrs->retry_count;
ratetbl->rate[offset].count_rts = mrs->retry_count_rtscts;
}
index = MI_RATE_IDX(index);
if (group_idx == MINSTREL_CCK_GROUP)
idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
else if (group_idx == MINSTREL_OFDM_GROUP)
idx = mp->ofdm_rates[mi->band][index %
ARRAY_SIZE(mp->ofdm_rates[0])];
else if (flags & IEEE80211_TX_RC_VHT_MCS)
idx = ((group->streams - 1) << 4) |
(index & 0xF);
else
idx = index + (group->streams - 1) * 8;
/* enable RTS/CTS if needed:
* - if station is in dynamic SMPS (and streams > 1)
* - for fallback rates, to increase chances of getting through
*/
if (offset > 0 ||
(mi->sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC &&
group->streams > 1)) {
ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
flags |= IEEE80211_TX_RC_USE_RTS_CTS;
}
ratetbl->rate[offset].idx = idx;
ratetbl->rate[offset].flags = flags;
}
static inline int
minstrel_ht_get_prob_avg(struct minstrel_ht_sta *mi, int rate)
{
int group = MI_RATE_GROUP(rate);
rate = MI_RATE_IDX(rate);
return mi->groups[group].rates[rate].prob_avg;
}
static int
minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
{
int group = MI_RATE_GROUP(mi->max_prob_rate);
const struct mcs_group *g = &minstrel_mcs_groups[group];
int rate = MI_RATE_IDX(mi->max_prob_rate);
unsigned int duration;
/* Disable A-MSDU if max_prob_rate is bad */
if (mi->groups[group].rates[rate].prob_avg < MINSTREL_FRAC(50, 100))
return 1;
duration = g->duration[rate];
duration <<= g->shift;
/* If the rate is slower than single-stream MCS1, make A-MSDU limit small */
if (duration > MCS_DURATION(1, 0, 52))
return 500;
/*
* If the rate is slower than single-stream MCS4, limit A-MSDU to usual
* data packet size
*/
if (duration > MCS_DURATION(1, 0, 104))
return 1600;
/*
* If the rate is slower than single-stream MCS7, or if the max throughput
* rate success probability is less than 75%, limit A-MSDU to twice the usual
* data packet size
*/
if (duration > MCS_DURATION(1, 0, 260) ||
(minstrel_ht_get_prob_avg(mi, mi->max_tp_rate[0]) <
MINSTREL_FRAC(75, 100)))
return 3200;
/*
* HT A-MPDU limits maximum MPDU size under BA agreement to 4095 bytes.
* Since aggregation sessions are started/stopped without txq flush, use
* the limit here to avoid the complexity of having to de-aggregate
* packets in the queue.
*/
if (!mi->sta->deflink.vht_cap.vht_supported)
return IEEE80211_MAX_MPDU_LEN_HT_BA;
/* unlimited */
return 0;
}
static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct ieee80211_sta_rates *rates;
int i = 0;
int max_rates = min_t(int, mp->hw->max_rates, IEEE80211_TX_RATE_TABLE_SIZE);
rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
if (!rates)
return;
/* Start with max_tp_rate[0] */
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
/* Fill up remaining, keep one entry for max_probe_rate */
for (; i < (max_rates - 1); i++)
minstrel_ht_set_rate(mp, mi, rates, i, mi->max_tp_rate[i]);
if (i < max_rates)
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
if (i < IEEE80211_TX_RATE_TABLE_SIZE)
rates->rate[i].idx = -1;
mi->sta->deflink.agg.max_rc_amsdu_len = minstrel_ht_get_max_amsdu_len(mi);
ieee80211_sta_recalc_aggregates(mi->sta);
rate_control_set_rates(mp->hw, mi->sta, rates);
}
static u16
minstrel_ht_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
u8 seq;
if (mp->hw->max_rates > 1) {
seq = mi->sample_seq;
mi->sample_seq = (seq + 1) % ARRAY_SIZE(minstrel_sample_seq);
seq = minstrel_sample_seq[seq];
} else {
seq = MINSTREL_SAMPLE_TYPE_INC;
}
return __minstrel_ht_get_sample_rate(mi, seq);
}
static void
minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
{
const struct mcs_group *sample_group;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
struct ieee80211_tx_rate *rate = &info->status.rates[0];
struct minstrel_ht_sta *mi = priv_sta;
struct minstrel_priv *mp = priv;
u16 sample_idx;
info->flags |= mi->tx_flags;
#ifdef CONFIG_MAC80211_DEBUGFS
if (mp->fixed_rate_idx != -1)
return;
#endif
/* Don't use EAPOL frames for sampling on non-mrr hw */
if (mp->hw->max_rates == 1 &&
(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
return;
if (time_is_after_jiffies(mi->sample_time))
return;
mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
sample_idx = minstrel_ht_get_sample_rate(mp, mi);
if (!sample_idx)
return;
sample_group = &minstrel_mcs_groups[MI_RATE_GROUP(sample_idx)];
sample_idx = MI_RATE_IDX(sample_idx);
if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP] &&
(sample_idx >= 4) != txrc->short_preamble)
return;
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
rate->count = 1;
if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP]) {
int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
rate->idx = mp->cck_rates[idx];
} else if (sample_group == &minstrel_mcs_groups[MINSTREL_OFDM_GROUP]) {
int idx = sample_idx % ARRAY_SIZE(mp->ofdm_rates[0]);
rate->idx = mp->ofdm_rates[mi->band][idx];
} else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) {
ieee80211_rate_set_vht(rate, MI_RATE_IDX(sample_idx),
sample_group->streams);
} else {
rate->idx = sample_idx + (sample_group->streams - 1) * 8;
}
rate->flags = sample_group->flags;
}
static void
minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta)
{
int i;
if (sband->band != NL80211_BAND_2GHZ)
return;
if (sta->deflink.ht_cap.ht_supported &&
!ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES))
return;
for (i = 0; i < 4; i++) {
if (mp->cck_rates[i] == 0xff ||
!rate_supported(sta, sband->band, mp->cck_rates[i]))
continue;
mi->supported[MINSTREL_CCK_GROUP] |= BIT(i);
if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE)
mi->supported[MINSTREL_CCK_GROUP] |= BIT(i + 4);
}
}
static void
minstrel_ht_update_ofdm(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta)
{
const u8 *rates;
int i;
if (sta->deflink.ht_cap.ht_supported)
return;
rates = mp->ofdm_rates[sband->band];
for (i = 0; i < ARRAY_SIZE(mp->ofdm_rates[0]); i++) {
if (rates[i] == 0xff ||
!rate_supported(sta, sband->band, rates[i]))
continue;
mi->supported[MINSTREL_OFDM_GROUP] |= BIT(i);
}
}
static void
minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_priv *mp = priv;
struct minstrel_ht_sta *mi = priv_sta;
struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
u16 ht_cap = sta->deflink.ht_cap.cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
const struct ieee80211_rate *ctl_rate;
struct sta_info *sta_info;
bool ldpc, erp;
int use_vht;
int ack_dur;
int stbc;
int i;
BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB);
if (vht_cap->vht_supported)
use_vht = vht_cap->vht_mcs.tx_mcs_map != cpu_to_le16(~0);
else
use_vht = 0;
memset(mi, 0, sizeof(*mi));
mi->sta = sta;
mi->band = sband->band;
mi->last_stats_update = jiffies;
ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
mi->overhead += ack_dur;
mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
ctl_rate = &sband->bitrates[rate_lowest_index(sband, sta)];
erp = ctl_rate->flags & IEEE80211_RATE_ERP_G;
ack_dur = ieee80211_frame_duration(sband->band, 10,
ctl_rate->bitrate, erp, 1,
ieee80211_chandef_get_shift(chandef));
mi->overhead_legacy = ack_dur;
mi->overhead_legacy_rtscts = mi->overhead_legacy + 2 * ack_dur;
mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
if (!use_vht) {
stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >>
IEEE80211_HT_CAP_RX_STBC_SHIFT;
ldpc = ht_cap & IEEE80211_HT_CAP_LDPC_CODING;
} else {
stbc = (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) >>
IEEE80211_VHT_CAP_RXSTBC_SHIFT;
ldpc = vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC;
}
mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
if (ldpc)
mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
u32 gflags = minstrel_mcs_groups[i].flags;
int bw, nss;
mi->supported[i] = 0;
if (minstrel_ht_is_legacy_group(i))
continue;
if (gflags & IEEE80211_TX_RC_SHORT_GI) {
if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
if (!(ht_cap & IEEE80211_HT_CAP_SGI_40))
continue;
} else {
if (!(ht_cap & IEEE80211_HT_CAP_SGI_20))
continue;
}
}
if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
sta->deflink.bandwidth < IEEE80211_STA_RX_BW_40)
continue;
nss = minstrel_mcs_groups[i].streams;
/* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC && nss > 1)
continue;
/* HT rate */
if (gflags & IEEE80211_TX_RC_MCS) {
if (use_vht && minstrel_vht_only)
continue;
mi->supported[i] = mcs->rx_mask[nss - 1];
continue;
}
/* VHT rate */
if (!vht_cap->vht_supported ||
WARN_ON(!(gflags & IEEE80211_TX_RC_VHT_MCS)) ||
WARN_ON(gflags & IEEE80211_TX_RC_160_MHZ_WIDTH))
continue;
if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) {
if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_80 ||
((gflags & IEEE80211_TX_RC_SHORT_GI) &&
!(vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80))) {
continue;
}
}
if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH)
bw = BW_40;
else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH)
bw = BW_80;
else
bw = BW_20;
mi->supported[i] = minstrel_get_valid_vht_rates(bw, nss,
vht_cap->vht_mcs.tx_mcs_map);
}
sta_info = container_of(sta, struct sta_info, sta);
mi->use_short_preamble = test_sta_flag(sta_info, WLAN_STA_SHORT_PREAMBLE) &&
sta_info->sdata->vif.bss_conf.use_short_preamble;
minstrel_ht_update_cck(mp, mi, sband, sta);
minstrel_ht_update_ofdm(mp, mi, sband, sta);
/* create an initial rate table with the lowest supported rates */
minstrel_ht_update_stats(mp, mi);
minstrel_ht_update_rates(mp, mi);
}
static void
minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
}
static void
minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta,
u32 changed)
{
minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
}
static void *
minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
struct ieee80211_supported_band *sband;
struct minstrel_ht_sta *mi;
struct minstrel_priv *mp = priv;
struct ieee80211_hw *hw = mp->hw;
int max_rates = 0;
int i;
for (i = 0; i < NUM_NL80211_BANDS; i++) {
sband = hw->wiphy->bands[i];
if (sband && sband->n_bitrates > max_rates)
max_rates = sband->n_bitrates;
}
return kzalloc(sizeof(*mi), gfp);
}
static void
minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
{
kfree(priv_sta);
}
static void
minstrel_ht_fill_rate_array(u8 *dest, struct ieee80211_supported_band *sband,
const s16 *bitrates, int n_rates, u32 rate_flags)
{
int i, j;
for (i = 0; i < sband->n_bitrates; i++) {
struct ieee80211_rate *rate = &sband->bitrates[i];
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
for (j = 0; j < n_rates; j++) {
if (rate->bitrate != bitrates[j])
continue;
dest[j] = i;
break;
}
}
}
static void
minstrel_ht_init_cck_rates(struct minstrel_priv *mp)
{
static const s16 bitrates[4] = { 10, 20, 55, 110 };
struct ieee80211_supported_band *sband;
u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
memset(mp->cck_rates, 0xff, sizeof(mp->cck_rates));
sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ];
if (!sband)
return;
BUILD_BUG_ON(ARRAY_SIZE(mp->cck_rates) != ARRAY_SIZE(bitrates));
minstrel_ht_fill_rate_array(mp->cck_rates, sband,
minstrel_cck_bitrates,
ARRAY_SIZE(minstrel_cck_bitrates),
rate_flags);
}
static void
minstrel_ht_init_ofdm_rates(struct minstrel_priv *mp, enum nl80211_band band)
{
static const s16 bitrates[8] = { 60, 90, 120, 180, 240, 360, 480, 540 };
struct ieee80211_supported_band *sband;
u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
memset(mp->ofdm_rates[band], 0xff, sizeof(mp->ofdm_rates[band]));
sband = mp->hw->wiphy->bands[band];
if (!sband)
return;
BUILD_BUG_ON(ARRAY_SIZE(mp->ofdm_rates[band]) != ARRAY_SIZE(bitrates));
minstrel_ht_fill_rate_array(mp->ofdm_rates[band], sband,
minstrel_ofdm_bitrates,
ARRAY_SIZE(minstrel_ofdm_bitrates),
rate_flags);
}
static void *
minstrel_ht_alloc(struct ieee80211_hw *hw)
{
struct minstrel_priv *mp;
int i;
mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
if (!mp)
return NULL;
/* contention window settings
* Just an approximation. Using the per-queue values would complicate
* the calculations and is probably unnecessary */
mp->cw_min = 15;
mp->cw_max = 1023;
/* maximum time that the hw is allowed to stay in one MRR segment */
mp->segment_size = 6000;
if (hw->max_rate_tries > 0)
mp->max_retry = hw->max_rate_tries;
else
/* safe default, does not necessarily have to match hw properties */
mp->max_retry = 7;
mp->hw = hw;
mp->update_interval = HZ / 20;
minstrel_ht_init_cck_rates(mp);
for (i = 0; i < ARRAY_SIZE(mp->hw->wiphy->bands); i++)
minstrel_ht_init_ofdm_rates(mp, i);
return mp;
}
#ifdef CONFIG_MAC80211_DEBUGFS
static void minstrel_ht_add_debugfs(struct ieee80211_hw *hw, void *priv,
struct dentry *debugfsdir)
{
struct minstrel_priv *mp = priv;
mp->fixed_rate_idx = (u32) -1;
debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir,
&mp->fixed_rate_idx);
}
#endif
static void
minstrel_ht_free(void *priv)
{
kfree(priv);
}
static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
{
struct minstrel_ht_sta *mi = priv_sta;
int i, j, prob, tp_avg;
i = MI_RATE_GROUP(mi->max_tp_rate[0]);
j = MI_RATE_IDX(mi->max_tp_rate[0]);
prob = mi->groups[i].rates[j].prob_avg;
/* convert tp_avg from pkt per second in kbps */
tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
return tp_avg;
}
static const struct rate_control_ops mac80211_minstrel_ht = {
.name = "minstrel_ht",
.capa = RATE_CTRL_CAPA_AMPDU_TRIGGER,
.tx_status_ext = minstrel_ht_tx_status,
.get_rate = minstrel_ht_get_rate,
.rate_init = minstrel_ht_rate_init,
.rate_update = minstrel_ht_rate_update,
.alloc_sta = minstrel_ht_alloc_sta,
.free_sta = minstrel_ht_free_sta,
.alloc = minstrel_ht_alloc,
.free = minstrel_ht_free,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_debugfs = minstrel_ht_add_debugfs,
.add_sta_debugfs = minstrel_ht_add_sta_debugfs,
#endif
.get_expected_throughput = minstrel_ht_get_expected_throughput,
};
static void __init init_sample_table(void)
{
int col, i, new_idx;
u8 rnd[MCS_GROUP_RATES];
memset(sample_table, 0xff, sizeof(sample_table));
for (col = 0; col < SAMPLE_COLUMNS; col++) {
get_random_bytes(rnd, sizeof(rnd));
for (i = 0; i < MCS_GROUP_RATES; i++) {
new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
while (sample_table[col][new_idx] != 0xff)
new_idx = (new_idx + 1) % MCS_GROUP_RATES;
sample_table[col][new_idx] = i;
}
}
}
int __init
rc80211_minstrel_init(void)
{
init_sample_table();
return ieee80211_rate_control_register(&mac80211_minstrel_ht);
}
void
rc80211_minstrel_exit(void)
{
ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
}
| linux-master | net/mac80211/rc80211_minstrel_ht.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
* Copyright (C) 2018-2023 Intel Corporation
*/
#include <net/mac80211.h>
#include <linux/module.h>
#include <linux/fips.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/bitmap.h>
#include <linux/inetdevice.h>
#include <net/net_namespace.h>
#include <net/dropreason.h>
#include <net/cfg80211.h>
#include <net/addrconf.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "rate.h"
#include "mesh.h"
#include "wep.h"
#include "led.h"
#include "debugfs.h"
void ieee80211_configure_filter(struct ieee80211_local *local)
{
u64 mc;
unsigned int changed_flags;
unsigned int new_flags = 0;
if (atomic_read(&local->iff_allmultis))
new_flags |= FIF_ALLMULTI;
if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) ||
test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning))
new_flags |= FIF_BCN_PRBRESP_PROMISC;
if (local->fif_probe_req || local->probe_req_reg)
new_flags |= FIF_PROBE_REQ;
if (local->fif_fcsfail)
new_flags |= FIF_FCSFAIL;
if (local->fif_plcpfail)
new_flags |= FIF_PLCPFAIL;
if (local->fif_control)
new_flags |= FIF_CONTROL;
if (local->fif_other_bss)
new_flags |= FIF_OTHER_BSS;
if (local->fif_pspoll)
new_flags |= FIF_PSPOLL;
if (local->rx_mcast_action_reg)
new_flags |= FIF_MCAST_ACTION;
spin_lock_bh(&local->filter_lock);
changed_flags = local->filter_flags ^ new_flags;
mc = drv_prepare_multicast(local, &local->mc_list);
spin_unlock_bh(&local->filter_lock);
/* be a bit nasty */
new_flags |= (1<<31);
drv_configure_filter(local, changed_flags, &new_flags, mc);
WARN_ON(new_flags & (1<<31));
local->filter_flags = new_flags & ~(1<<31);
}
static void ieee80211_reconfig_filter(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, reconfig_filter);
ieee80211_configure_filter(local);
}
static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
struct cfg80211_chan_def chandef = {};
u32 changed = 0;
int power;
u32 offchannel_flag;
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (local->scan_chandef.chan) {
chandef = local->scan_chandef;
} else if (local->tmp_channel) {
chandef.chan = local->tmp_channel;
chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
chandef.center_freq1 = chandef.chan->center_freq;
chandef.freq1_offset = chandef.chan->freq_offset;
} else
chandef = local->_oper_chandef;
WARN(!cfg80211_chandef_valid(&chandef),
"control:%d.%03d MHz width:%d center: %d.%03d/%d MHz",
chandef.chan->center_freq, chandef.chan->freq_offset,
chandef.width, chandef.center_freq1, chandef.freq1_offset,
chandef.center_freq2);
if (!cfg80211_chandef_identical(&chandef, &local->_oper_chandef))
local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
else
local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (offchannel_flag ||
!cfg80211_chandef_identical(&local->hw.conf.chandef,
&local->_oper_chandef)) {
local->hw.conf.chandef = chandef;
changed |= IEEE80211_CONF_CHANGE_CHANNEL;
}
if (!conf_is_ht(&local->hw.conf)) {
/*
* mac80211.h documents that this is only valid
* when the channel is set to an HT type, and
* that otherwise STATIC is used.
*/
local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
} else if (local->hw.conf.smps_mode != local->smps_mode) {
local->hw.conf.smps_mode = local->smps_mode;
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
power = ieee80211_chandef_max_power(&chandef);
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (!rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf))
continue;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
continue;
if (sdata->vif.bss_conf.txpower == INT_MIN)
continue;
power = min(power, sdata->vif.bss_conf.txpower);
}
rcu_read_unlock();
if (local->hw.conf.power_level != power) {
changed |= IEEE80211_CONF_CHANGE_POWER;
local->hw.conf.power_level = power;
}
return changed;
}
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
{
int ret = 0;
might_sleep();
if (!local->use_chanctx)
changed |= ieee80211_hw_conf_chan(local);
else
changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
IEEE80211_CONF_CHANGE_POWER |
IEEE80211_CONF_CHANGE_SMPS);
if (changed && local->open_count) {
ret = drv_config(local, changed);
/*
* Goal:
* HW reconfiguration should never fail, the driver has told
* us what it can support so it should live up to that promise.
*
* Current status:
* rfkill is not integrated with mac80211 and a
* configuration command can thus fail if hardware rfkill
* is enabled
*
* FIXME: integrate rfkill with mac80211 and then add this
* WARN_ON() back
*
*/
/* WARN_ON(ret); */
}
return ret;
}
#define BSS_CHANGED_VIF_CFG_FLAGS (BSS_CHANGED_ASSOC |\
BSS_CHANGED_IDLE |\
BSS_CHANGED_PS |\
BSS_CHANGED_IBSS |\
BSS_CHANGED_ARP_FILTER |\
BSS_CHANGED_SSID)
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed)
{
struct ieee80211_local *local = sdata->local;
might_sleep();
if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
return;
if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED) &&
sdata->vif.type != NL80211_IFTYPE_AP &&
sdata->vif.type != NL80211_IFTYPE_ADHOC &&
sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
sdata->vif.type != NL80211_IFTYPE_OCB))
return;
if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
sdata->vif.type == NL80211_IFTYPE_NAN ||
(sdata->vif.type == NL80211_IFTYPE_MONITOR &&
!sdata->vif.bss_conf.mu_mimo_owner &&
!(changed & BSS_CHANGED_TXPOWER))))
return;
if (!check_sdata_in_driver(sdata))
return;
if (changed & BSS_CHANGED_VIF_CFG_FLAGS) {
u64 ch = changed & BSS_CHANGED_VIF_CFG_FLAGS;
trace_drv_vif_cfg_changed(local, sdata, changed);
if (local->ops->vif_cfg_changed)
local->ops->vif_cfg_changed(&local->hw, &sdata->vif, ch);
}
if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) {
u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS;
/* FIXME: should be for each link */
trace_drv_link_info_changed(local, sdata, &sdata->vif.bss_conf,
changed);
if (local->ops->link_info_changed)
local->ops->link_info_changed(&local->hw, &sdata->vif,
&sdata->vif.bss_conf, ch);
}
if (local->ops->bss_info_changed)
local->ops->bss_info_changed(&local->hw, &sdata->vif,
&sdata->vif.bss_conf, changed);
trace_drv_return_void(local);
}
void ieee80211_vif_cfg_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed)
{
struct ieee80211_local *local = sdata->local;
WARN_ON_ONCE(changed & ~BSS_CHANGED_VIF_CFG_FLAGS);
if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
return;
drv_vif_cfg_changed(local, sdata, changed);
}
void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
u64 changed)
{
struct ieee80211_local *local = sdata->local;
WARN_ON_ONCE(changed & BSS_CHANGED_VIF_CFG_FLAGS);
if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
return;
if (!check_sdata_in_driver(sdata))
return;
drv_link_info_changed(local, sdata, link->conf, link->link_id, changed);
}
u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
{
sdata->vif.bss_conf.use_cts_prot = false;
sdata->vif.bss_conf.use_short_preamble = false;
sdata->vif.bss_conf.use_short_slot = false;
return BSS_CHANGED_ERP_CTS_PROT |
BSS_CHANGED_ERP_PREAMBLE |
BSS_CHANGED_ERP_SLOT;
}
static void ieee80211_tasklet_handler(struct tasklet_struct *t)
{
struct ieee80211_local *local = from_tasklet(local, t, tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->skb_queue)) ||
(skb = skb_dequeue(&local->skb_queue_unreliable))) {
switch (skb->pkt_type) {
case IEEE80211_RX_MSG:
/* Clear skb->pkt_type in order to not confuse kernel
* netstack. */
skb->pkt_type = 0;
ieee80211_rx(&local->hw, skb);
break;
case IEEE80211_TX_STATUS_MSG:
skb->pkt_type = 0;
ieee80211_tx_status(&local->hw, skb);
break;
default:
WARN(1, "mac80211: Packet is of unknown type %d\n",
skb->pkt_type);
dev_kfree_skb(skb);
break;
}
}
}
static void ieee80211_restart_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, restart_work);
struct ieee80211_sub_if_data *sdata;
int ret;
/* wait for scan work complete */
flush_workqueue(local->workqueue);
flush_work(&local->sched_scan_stopped_work);
flush_work(&local->radar_detected_work);
rtnl_lock();
/* we might do interface manipulations, so need both */
wiphy_lock(local->hw.wiphy);
WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
"%s called with hardware scan in progress\n", __func__);
list_for_each_entry(sdata, &local->interfaces, list) {
/*
* XXX: there may be more work for other vif types and even
* for station mode: a good thing would be to run most of
* the iface type's dependent _stop (ieee80211_mg_stop,
* ieee80211_ibss_stop) etc...
* For now, fix only the specific bug that was seen: race
* between csa_connection_drop_work and us.
*/
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
/*
* This worker is scheduled from the iface worker that
* runs on mac80211's workqueue, so we can't be
* scheduling this worker after the cancel right here.
* The exception is ieee80211_chswitch_done.
* Then we can have a race...
*/
wiphy_work_cancel(local->hw.wiphy,
&sdata->u.mgd.csa_connection_drop_work);
if (sdata->vif.bss_conf.csa_active) {
sdata_lock(sdata);
ieee80211_sta_connection_lost(sdata,
WLAN_REASON_UNSPECIFIED,
false);
sdata_unlock(sdata);
}
}
flush_delayed_work(&sdata->dec_tailroom_needed_wk);
}
ieee80211_scan_cancel(local);
/* make sure any new ROC will consider local->in_reconfig */
flush_delayed_work(&local->roc_work);
flush_work(&local->hw_roc_done);
/* wait for all packet processing to be done */
synchronize_net();
ret = ieee80211_reconfig(local);
wiphy_unlock(local->hw.wiphy);
if (ret)
cfg80211_shutdown_all_interfaces(local->hw.wiphy);
rtnl_unlock();
}
void ieee80211_restart_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
trace_api_restart_hw(local);
wiphy_info(hw->wiphy,
"Hardware restart was requested\n");
/* use this reason, ieee80211_reconfig will unblock it */
ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
false);
/*
* Stop all Rx during the reconfig. We don't want state changes
* or driver callbacks while this is in progress.
*/
local->in_reconfig = true;
barrier();
queue_work(system_freezable_wq, &local->restart_work);
}
EXPORT_SYMBOL(ieee80211_restart_hw);
#ifdef CONFIG_INET
static int ieee80211_ifa_changed(struct notifier_block *nb,
unsigned long data, void *arg)
{
struct in_ifaddr *ifa = arg;
struct ieee80211_local *local =
container_of(nb, struct ieee80211_local,
ifa_notifier);
struct net_device *ndev = ifa->ifa_dev->dev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct in_device *idev;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_vif_cfg *vif_cfg;
struct ieee80211_if_managed *ifmgd;
int c = 0;
/* Make sure it's our interface that got changed */
if (!wdev)
return NOTIFY_DONE;
if (wdev->wiphy != local->hw.wiphy)
return NOTIFY_DONE;
sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
vif_cfg = &sdata->vif.cfg;
/* ARP filtering is only supported in managed mode */
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return NOTIFY_DONE;
idev = __in_dev_get_rtnl(sdata->dev);
if (!idev)
return NOTIFY_DONE;
ifmgd = &sdata->u.mgd;
sdata_lock(sdata);
/* Copy the addresses to the vif config list */
ifa = rtnl_dereference(idev->ifa_list);
while (ifa) {
if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN)
vif_cfg->arp_addr_list[c] = ifa->ifa_address;
ifa = rtnl_dereference(ifa->ifa_next);
c++;
}
vif_cfg->arp_addr_cnt = c;
/* Configure driver only if associated (which also implies it is up) */
if (ifmgd->associated)
ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_ARP_FILTER);
sdata_unlock(sdata);
return NOTIFY_OK;
}
#endif
#if IS_ENABLED(CONFIG_IPV6)
static int ieee80211_ifa6_changed(struct notifier_block *nb,
unsigned long data, void *arg)
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg;
struct inet6_dev *idev = ifa->idev;
struct net_device *ndev = ifa->idev->dev;
struct ieee80211_local *local =
container_of(nb, struct ieee80211_local, ifa6_notifier);
struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct ieee80211_sub_if_data *sdata;
/* Make sure it's our interface that got changed */
if (!wdev || wdev->wiphy != local->hw.wiphy)
return NOTIFY_DONE;
sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
/*
* For now only support station mode. This is mostly because
* doing AP would have to handle AP_VLAN in some way ...
*/
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return NOTIFY_DONE;
drv_ipv6_addr_change(local, sdata, idev);
return NOTIFY_OK;
}
#endif
/* There isn't a lot of sense in it, but you can transmit anything you like */
static const struct ieee80211_txrx_stypes
ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_ADHOC] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
},
[NL80211_IFTYPE_STATION] = {
.tx = 0xffff,
/*
* To support Pre Association Security Negotiation (PASN) while
* already associated to one AP, allow user space to register to
* Rx authentication frames, so that the user space logic would
* be able to receive/handle authentication frames from a
* different AP as part of PASN.
* It is expected that user space would intelligently register
* for Rx authentication frames, i.e., only when PASN is used
* and configure a match filter only for PASN authentication
* algorithm, as otherwise the MLME functionality of mac80211
* would be broken.
*/
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
},
[NL80211_IFTYPE_AP] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_DISASSOC >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_ACTION >> 4),
},
[NL80211_IFTYPE_AP_VLAN] = {
/* copy AP */
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_DISASSOC >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_ACTION >> 4),
},
[NL80211_IFTYPE_P2P_CLIENT] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
},
[NL80211_IFTYPE_P2P_GO] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_DISASSOC >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_ACTION >> 4),
},
[NL80211_IFTYPE_MESH_POINT] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4),
},
[NL80211_IFTYPE_P2P_DEVICE] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
},
};
static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
.ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR |
IEEE80211_HT_AMPDU_PARM_DENSITY,
.cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_MAX_AMSDU |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_TX_STBC |
IEEE80211_HT_CAP_RX_STBC |
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_40MHZ_INTOLERANT),
.mcs = {
.rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, },
},
};
static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
.vht_cap_info =
cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_RXSTBC_MASK |
IEEE80211_VHT_CAP_TXSTBC |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK),
.supp_mcs = {
.rx_mcs_map = cpu_to_le16(~0),
.tx_mcs_map = cpu_to_le16(~0),
},
};
struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
const struct ieee80211_ops *ops,
const char *requested_name)
{
struct ieee80211_local *local;
int priv_size, i;
struct wiphy *wiphy;
bool use_chanctx;
if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
!ops->add_interface || !ops->remove_interface ||
!ops->configure_filter || !ops->wake_tx_queue))
return NULL;
if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
return NULL;
if (WARN_ON(!!ops->link_info_changed != !!ops->vif_cfg_changed ||
(ops->link_info_changed && ops->bss_info_changed)))
return NULL;
/* check all or no channel context operations exist */
i = !!ops->add_chanctx + !!ops->remove_chanctx +
!!ops->change_chanctx + !!ops->assign_vif_chanctx +
!!ops->unassign_vif_chanctx;
if (WARN_ON(i != 0 && i != 5))
return NULL;
use_chanctx = i == 5;
/* Ensure 32-byte alignment of our private data and hw private data.
* We use the wiphy priv data for both our ieee80211_local and for
* the driver's private data
*
* In memory it'll be like this:
*
* +-------------------------+
* | struct wiphy |
* +-------------------------+
* | struct ieee80211_local |
* +-------------------------+
* | driver's private data |
* +-------------------------+
*
*/
priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len;
wiphy = wiphy_new_nm(&mac80211_config_ops, priv_size, requested_name);
if (!wiphy)
return NULL;
wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes;
wiphy->privid = mac80211_wiphy_privid;
wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_4ADDR_AP |
WIPHY_FLAG_4ADDR_STATION |
WIPHY_FLAG_REPORTS_OBSS |
WIPHY_FLAG_OFFCHAN_TX;
if (!use_chanctx || ops->remain_on_channel)
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
NL80211_FEATURE_SAE |
NL80211_FEATURE_HT_IBSS |
NL80211_FEATURE_VIF_TXPOWER |
NL80211_FEATURE_MAC_ON_CREATE |
NL80211_FEATURE_USERSPACE_MPM |
NL80211_FEATURE_FULL_AP_CLIENT_STATE;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_STA);
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211);
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH);
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS);
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_SCAN_FREQ_KHZ);
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE);
if (!ops->hw_scan) {
wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_AP_SCAN;
/*
* if the driver behaves correctly using the probe request
* (template) from mac80211, then both of these should be
* supported even with hw scan - but let drivers opt in.
*/
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_SCAN_RANDOM_SN);
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
}
if (!ops->set_key)
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM);
wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
local = wiphy_priv(wiphy);
if (sta_info_init(local))
goto err_free;
local->hw.wiphy = wiphy;
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
local->ops = ops;
local->use_chanctx = use_chanctx;
/*
* We need a bit of data queued to build aggregates properly, so
* instruct the TCP stack to allow more than a single ms of data
* to be queued in the stack. The value is a bit-shift of 1
* second, so 7 is ~8ms of queued data. Only affects local TCP
* sockets.
* This is the default, anyhow - drivers may need to override it
* for local reasons (longer buffers, longer completion time, or
* similar).
*/
local->hw.tx_sk_pacing_shift = 7;
/* set up some defaults */
local->hw.queues = 1;
local->hw.max_rates = 1;
local->hw.max_report_rates = 0;
local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT;
local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT;
local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
IEEE80211_RADIOTAP_MCS_HAVE_GI |
IEEE80211_RADIOTAP_MCS_HAVE_BW;
local->hw.radiotap_vht_details = IEEE80211_RADIOTAP_VHT_KNOWN_GI |
IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
local->hw.uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
local->hw.uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
local->hw.max_mtu = IEEE80211_MAX_DATA_LEN;
local->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
wiphy->vht_capa_mod_mask = &mac80211_vht_capa_mod_mask;
local->ext_capa[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF;
wiphy->extended_capabilities = local->ext_capa;
wiphy->extended_capabilities_mask = local->ext_capa;
wiphy->extended_capabilities_len =
ARRAY_SIZE(local->ext_capa);
INIT_LIST_HEAD(&local->interfaces);
INIT_LIST_HEAD(&local->mon_list);
__hw_addr_init(&local->mc_list);
mutex_init(&local->iflist_mtx);
mutex_init(&local->mtx);
mutex_init(&local->key_mtx);
spin_lock_init(&local->filter_lock);
spin_lock_init(&local->rx_path_lock);
spin_lock_init(&local->queue_stop_reason_lock);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
INIT_LIST_HEAD(&local->active_txqs[i]);
spin_lock_init(&local->active_txq_lock[i]);
local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
local->aql_txq_limit_high[i] =
IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H;
atomic_set(&local->aql_ac_pending_airtime[i], 0);
}
local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX;
local->aql_threshold = IEEE80211_AQL_THRESHOLD;
atomic_set(&local->aql_total_pending_airtime, 0);
spin_lock_init(&local->handle_wake_tx_queue_lock);
INIT_LIST_HEAD(&local->chanctx_list);
mutex_init(&local->chanctx_mtx);
INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
INIT_WORK(&local->restart_work, ieee80211_restart_work);
INIT_WORK(&local->radar_detected_work,
ieee80211_dfs_radar_detected_work);
INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
local->smps_mode = IEEE80211_SMPS_OFF;
INIT_WORK(&local->dynamic_ps_enable_work,
ieee80211_dynamic_ps_enable_work);
INIT_WORK(&local->dynamic_ps_disable_work,
ieee80211_dynamic_ps_disable_work);
timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0);
INIT_WORK(&local->sched_scan_stopped_work,
ieee80211_sched_scan_stopped_work);
spin_lock_init(&local->ack_status_lock);
idr_init(&local->ack_status_frames);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
skb_queue_head_init(&local->pending[i]);
atomic_set(&local->agg_queue_stop[i], 0);
}
tasklet_setup(&local->tx_pending_tasklet, ieee80211_tx_pending);
tasklet_setup(&local->wake_txqs_tasklet, ieee80211_wake_txqs);
tasklet_setup(&local->tasklet, ieee80211_tasklet_handler);
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
ieee80211_alloc_led_names(local);
ieee80211_roc_setup(local);
local->hw.radiotap_timestamp.units_pos = -1;
local->hw.radiotap_timestamp.accuracy = -1;
return &local->hw;
err_free:
wiphy_free(wiphy);
return NULL;
}
EXPORT_SYMBOL(ieee80211_alloc_hw_nm);
static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
{
bool have_wep = !fips_enabled; /* FIPS does not permit the use of RC4 */
bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE);
int r = 0, w = 0;
u32 *suites;
static const u32 cipher_suites[] = {
/* keep WEP first, it may be removed below */
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WLAN_CIPHER_SUITE_CCMP_256,
WLAN_CIPHER_SUITE_GCMP,
WLAN_CIPHER_SUITE_GCMP_256,
/* keep last -- depends on hw flags! */
WLAN_CIPHER_SUITE_AES_CMAC,
WLAN_CIPHER_SUITE_BIP_CMAC_256,
WLAN_CIPHER_SUITE_BIP_GMAC_128,
WLAN_CIPHER_SUITE_BIP_GMAC_256,
};
if (ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) ||
local->hw.wiphy->cipher_suites) {
/* If the driver advertises, or doesn't support SW crypto,
* we only need to remove WEP if necessary.
*/
if (have_wep)
return 0;
/* well if it has _no_ ciphers ... fine */
if (!local->hw.wiphy->n_cipher_suites)
return 0;
/* Driver provides cipher suites, but we need to exclude WEP */
suites = kmemdup(local->hw.wiphy->cipher_suites,
sizeof(u32) * local->hw.wiphy->n_cipher_suites,
GFP_KERNEL);
if (!suites)
return -ENOMEM;
for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
u32 suite = local->hw.wiphy->cipher_suites[r];
if (suite == WLAN_CIPHER_SUITE_WEP40 ||
suite == WLAN_CIPHER_SUITE_WEP104)
continue;
suites[w++] = suite;
}
} else {
/* assign the (software supported and perhaps offloaded)
* cipher suites
*/
local->hw.wiphy->cipher_suites = cipher_suites;
local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
if (!have_mfp)
local->hw.wiphy->n_cipher_suites -= 4;
if (!have_wep) {
local->hw.wiphy->cipher_suites += 2;
local->hw.wiphy->n_cipher_suites -= 2;
}
/* not dynamically allocated, so just return */
return 0;
}
local->hw.wiphy->cipher_suites = suites;
local->hw.wiphy->n_cipher_suites = w;
local->wiphy_ciphers_allocated = true;
return 0;
}
int ieee80211_register_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
int result, i;
enum nl80211_band band;
int channels, max_bitrates;
bool supp_ht, supp_vht, supp_he, supp_eht;
struct cfg80211_chan_def dflt_chandef = {};
if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
(local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE ||
local->hw.offchannel_tx_hw_queue >= local->hw.queues))
return -EINVAL;
if ((hw->wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) &&
(!local->ops->tdls_channel_switch ||
!local->ops->tdls_cancel_channel_switch ||
!local->ops->tdls_recv_channel_switch))
return -EOPNOTSUPP;
if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_TX_FRAG) &&
!local->ops->set_frag_threshold))
return -EINVAL;
if (WARN_ON(local->hw.wiphy->interface_modes &
BIT(NL80211_IFTYPE_NAN) &&
(!local->ops->start_nan || !local->ops->stop_nan)))
return -EINVAL;
if (hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO) {
/*
* For drivers capable of doing MLO, assume modern driver
* or firmware facilities, so software doesn't have to do
* as much, e.g. monitoring beacons would be hard if we
* might not even know which link is active at which time.
*/
if (WARN_ON(!local->use_chanctx))
return -EINVAL;
if (WARN_ON(!local->ops->link_info_changed))
return -EINVAL;
if (WARN_ON(!ieee80211_hw_check(hw, HAS_RATE_CONTROL)))
return -EINVAL;
if (WARN_ON(!ieee80211_hw_check(hw, AMPDU_AGGREGATION)))
return -EINVAL;
if (WARN_ON(ieee80211_hw_check(hw, HOST_BROADCAST_PS_BUFFERING)))
return -EINVAL;
if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_PS) &&
(!ieee80211_hw_check(hw, SUPPORTS_DYNAMIC_PS) ||
ieee80211_hw_check(hw, PS_NULLFUNC_STACK))))
return -EINVAL;
if (WARN_ON(!ieee80211_hw_check(hw, MFP_CAPABLE)))
return -EINVAL;
if (WARN_ON(!ieee80211_hw_check(hw, CONNECTION_MONITOR)))
return -EINVAL;
if (WARN_ON(ieee80211_hw_check(hw, NEED_DTIM_BEFORE_ASSOC)))
return -EINVAL;
if (WARN_ON(ieee80211_hw_check(hw, TIMING_BEACON_ONLY)))
return -EINVAL;
if (WARN_ON(!ieee80211_hw_check(hw, AP_LINK_PS)))
return -EINVAL;
if (WARN_ON(ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP)))
return -EINVAL;
}
#ifdef CONFIG_PM
if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume))
return -EINVAL;
#endif
if (!local->use_chanctx) {
for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
const struct ieee80211_iface_combination *comb;
comb = &local->hw.wiphy->iface_combinations[i];
if (comb->num_different_channels > 1)
return -EINVAL;
}
} else {
/* DFS is not supported with multi-channel combinations yet */
for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
const struct ieee80211_iface_combination *comb;
comb = &local->hw.wiphy->iface_combinations[i];
if (comb->radar_detect_widths &&
comb->num_different_channels > 1)
return -EINVAL;
}
}
/* Only HW csum features are currently compatible with mac80211 */
if (WARN_ON(hw->netdev_features & ~MAC80211_SUPPORTED_FEATURES))
return -EINVAL;
if (hw->max_report_rates == 0)
hw->max_report_rates = hw->max_rates;
local->rx_chains = 1;
/*
* generic code guarantees at least one band,
* set this very early because much code assumes
* that hw.conf.channel is assigned
*/
channels = 0;
max_bitrates = 0;
supp_ht = false;
supp_vht = false;
supp_he = false;
supp_eht = false;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband;
sband = local->hw.wiphy->bands[band];
if (!sband)
continue;
if (!dflt_chandef.chan) {
/*
* Assign the first enabled channel to dflt_chandef
* from the list of channels
*/
for (i = 0; i < sband->n_channels; i++)
if (!(sband->channels[i].flags &
IEEE80211_CHAN_DISABLED))
break;
/* if none found then use the first anyway */
if (i == sband->n_channels)
i = 0;
cfg80211_chandef_create(&dflt_chandef,
&sband->channels[i],
NL80211_CHAN_NO_HT);
/* init channel we're on */
if (!local->use_chanctx && !local->_oper_chandef.chan) {
local->hw.conf.chandef = dflt_chandef;
local->_oper_chandef = dflt_chandef;
}
local->monitor_chandef = dflt_chandef;
}
channels += sband->n_channels;
/*
* Due to the way the aggregation code handles this and it
* being an HT capability, we can't really support delayed
* BA in MLO (yet).
*/
if (WARN_ON(sband->ht_cap.ht_supported &&
(sband->ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA) &&
hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))
return -EINVAL;
if (max_bitrates < sband->n_bitrates)
max_bitrates = sband->n_bitrates;
supp_ht = supp_ht || sband->ht_cap.ht_supported;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
for (i = 0; i < sband->n_iftype_data; i++) {
const struct ieee80211_sband_iftype_data *iftd;
iftd = &sband->iftype_data[i];
supp_he = supp_he || iftd->he_cap.has_he;
supp_eht = supp_eht || iftd->eht_cap.has_eht;
}
/* HT, VHT, HE require QoS, thus >= 4 queues */
if (WARN_ON(local->hw.queues < IEEE80211_NUM_ACS &&
(supp_ht || supp_vht || supp_he)))
return -EINVAL;
/* EHT requires HE support */
if (WARN_ON(supp_eht && !supp_he))
return -EINVAL;
if (!sband->ht_cap.ht_supported)
continue;
/* TODO: consider VHT for RX chains, hopefully it's the same */
local->rx_chains =
max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
local->rx_chains);
/* no need to mask, SM_PS_DISABLED has all bits set */
sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
IEEE80211_HT_CAP_SM_PS_SHIFT;
}
/* if low-level driver supports AP, we also support VLAN.
* drivers advertising SW_CRYPTO_CONTROL should enable AP_VLAN
* based on their support to transmit SW encrypted packets.
*/
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP) &&
!ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL)) {
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
}
/* mac80211 always supports monitor */
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
/* mac80211 doesn't support more than one IBSS interface right now */
for (i = 0; i < hw->wiphy->n_iface_combinations; i++) {
const struct ieee80211_iface_combination *c;
int j;
c = &hw->wiphy->iface_combinations[i];
for (j = 0; j < c->n_limits; j++)
if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
c->limits[j].max > 1)
return -EINVAL;
}
local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
sizeof(void *) * channels, GFP_KERNEL);
if (!local->int_scan_req)
return -ENOMEM;
eth_broadcast_addr(local->int_scan_req->bssid);
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!local->hw.wiphy->bands[band])
continue;
local->int_scan_req->rates[band] = (u32) -1;
}
#ifndef CONFIG_MAC80211_MESH
/* mesh depends on Kconfig, but drivers should set it if they want */
local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
#endif
/* if the underlying driver supports mesh, mac80211 will (at least)
* provide routing of mesh authentication frames to userspace */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
local->hw.wiphy->flags |= WIPHY_FLAG_MESH_AUTH;
/* mac80211 supports control port protocol changing */
local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) {
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
} else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) {
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
if (hw->max_signal <= 0) {
result = -EINVAL;
goto fail_workqueue;
}
}
/* Mac80211 and therefore all drivers using SW crypto only
* are able to handle PTK rekeys and Extended Key ID.
*/
if (!local->ops->set_key) {
wiphy_ext_feature_set(local->hw.wiphy,
NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
wiphy_ext_feature_set(local->hw.wiphy,
NL80211_EXT_FEATURE_EXT_KEY_ID);
}
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_ADHOC))
wiphy_ext_feature_set(local->hw.wiphy,
NL80211_EXT_FEATURE_DEL_IBSS_STA);
/*
* Calculate scan IE length -- we need this to alloc
* memory and to subtract from the driver limit. It
* includes the DS Params, (extended) supported rates, and HT
* information -- SSID is the driver's responsibility.
*/
local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ +
3 /* DS Params */;
if (supp_ht)
local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);
if (supp_vht)
local->scan_ies_len +=
2 + sizeof(struct ieee80211_vht_cap);
/*
* HE cap element is variable in size - set len to allow max size */
if (supp_he) {
local->scan_ies_len +=
3 + sizeof(struct ieee80211_he_cap_elem) +
sizeof(struct ieee80211_he_mcs_nss_supp) +
IEEE80211_HE_PPE_THRES_MAX_LEN;
if (supp_eht)
local->scan_ies_len +=
3 + sizeof(struct ieee80211_eht_cap_elem) +
sizeof(struct ieee80211_eht_mcs_nss_supp) +
IEEE80211_EHT_PPE_THRES_MAX_LEN;
}
if (!local->ops->hw_scan) {
/* For hw_scan, driver needs to set these up. */
local->hw.wiphy->max_scan_ssids = 4;
local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
}
/*
* If the driver supports any scan IEs, then assume the
* limit includes the IEs mac80211 will add, otherwise
* leave it at zero and let the driver sort it out; we
* still pass our IEs to the driver but userspace will
* not be allowed to in that case.
*/
if (local->hw.wiphy->max_scan_ie_len)
local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
result = ieee80211_init_cipher_suites(local);
if (result < 0)
goto fail_workqueue;
if (!local->ops->remain_on_channel)
local->hw.wiphy->max_remain_on_channel_duration = 5000;
/* mac80211 based drivers don't support internal TDLS setup */
if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
/* mac80211 supports eCSA, if the driver supports STA CSA at all */
if (ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA))
local->ext_capa[0] |= WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING;
/* mac80211 supports multi BSSID, if the driver supports it */
if (ieee80211_hw_check(&local->hw, SUPPORTS_MULTI_BSSID)) {
local->hw.wiphy->support_mbssid = true;
if (ieee80211_hw_check(&local->hw,
SUPPORTS_ONLY_HE_MULTI_BSSID))
local->hw.wiphy->support_only_he_mbssid = true;
else
local->ext_capa[2] |=
WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT;
}
local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CNTDWN_COUNTERS_NUM;
/*
* We use the number of queues for feature tests (QoS, HT) internally
* so restrict them appropriately.
*/
if (hw->queues > IEEE80211_MAX_QUEUES)
hw->queues = IEEE80211_MAX_QUEUES;
local->workqueue =
alloc_ordered_workqueue("%s", 0, wiphy_name(local->hw.wiphy));
if (!local->workqueue) {
result = -ENOMEM;
goto fail_workqueue;
}
/*
* The hardware needs headroom for sending the frame,
* and we need some headroom for passing the frame to monitor
* interfaces, but never both at the same time.
*/
local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
IEEE80211_TX_STATUS_HEADROOM);
/*
* if the driver doesn't specify a max listen interval we
* use 5 which should be a safe default
*/
if (local->hw.max_listen_interval == 0)
local->hw.max_listen_interval = 5;
local->hw.conf.listen_interval = local->hw.max_listen_interval;
local->dynamic_ps_forced_timeout = -1;
if (!local->hw.max_nan_de_entries)
local->hw.max_nan_de_entries = IEEE80211_MAX_NAN_INSTANCE_ID;
if (!local->hw.weight_multiplier)
local->hw.weight_multiplier = 1;
ieee80211_wep_init(local);
local->hw.conf.flags = IEEE80211_CONF_IDLE;
ieee80211_led_init(local);
result = ieee80211_txq_setup_flows(local);
if (result)
goto fail_flows;
rtnl_lock();
result = ieee80211_init_rate_ctrl_alg(local,
hw->rate_control_algorithm);
rtnl_unlock();
if (result < 0) {
wiphy_debug(local->hw.wiphy,
"Failed to initialize rate control algorithm\n");
goto fail_rate;
}
if (local->rate_ctrl) {
clear_bit(IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, hw->flags);
if (local->rate_ctrl->ops->capa & RATE_CTRL_CAPA_VHT_EXT_NSS_BW)
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
}
/*
* If the VHT capabilities don't have IEEE80211_VHT_EXT_NSS_BW_CAPABLE,
* or have it when we don't, copy the sband structure and set/clear it.
* This is necessary because rate scaling algorithms could be switched
* and have different support values.
* Print a message so that in the common case the reallocation can be
* avoided.
*/
BUILD_BUG_ON(NUM_NL80211_BANDS > 8 * sizeof(local->sband_allocated));
for (band = 0; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband;
bool local_cap, ie_cap;
local_cap = ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW);
sband = local->hw.wiphy->bands[band];
if (!sband || !sband->vht_cap.vht_supported)
continue;
ie_cap = !!(sband->vht_cap.vht_mcs.tx_highest &
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE));
if (local_cap == ie_cap)
continue;
sband = kmemdup(sband, sizeof(*sband), GFP_KERNEL);
if (!sband) {
result = -ENOMEM;
goto fail_rate;
}
wiphy_dbg(hw->wiphy, "copying sband (band %d) due to VHT EXT NSS BW flag\n",
band);
sband->vht_cap.vht_mcs.tx_highest ^=
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
local->hw.wiphy->bands[band] = sband;
local->sband_allocated |= BIT(band);
}
result = wiphy_register(local->hw.wiphy);
if (result < 0)
goto fail_wiphy_register;
debugfs_hw_add(local);
rate_control_add_debugfs(local);
rtnl_lock();
wiphy_lock(hw->wiphy);
/* add one default STA interface if supported */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) &&
!ieee80211_hw_check(hw, NO_AUTO_VIF)) {
struct vif_params params = {0};
result = ieee80211_if_add(local, "wlan%d", NET_NAME_ENUM, NULL,
NL80211_IFTYPE_STATION, ¶ms);
if (result)
wiphy_warn(local->hw.wiphy,
"Failed to add default virtual iface\n");
}
wiphy_unlock(hw->wiphy);
rtnl_unlock();
#ifdef CONFIG_INET
local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
result = register_inetaddr_notifier(&local->ifa_notifier);
if (result)
goto fail_ifa;
#endif
#if IS_ENABLED(CONFIG_IPV6)
local->ifa6_notifier.notifier_call = ieee80211_ifa6_changed;
result = register_inet6addr_notifier(&local->ifa6_notifier);
if (result)
goto fail_ifa6;
#endif
return 0;
#if IS_ENABLED(CONFIG_IPV6)
fail_ifa6:
#ifdef CONFIG_INET
unregister_inetaddr_notifier(&local->ifa_notifier);
#endif
#endif
#if defined(CONFIG_INET) || defined(CONFIG_IPV6)
fail_ifa:
#endif
wiphy_unregister(local->hw.wiphy);
fail_wiphy_register:
rtnl_lock();
rate_control_deinitialize(local);
ieee80211_remove_interfaces(local);
rtnl_unlock();
fail_rate:
fail_flows:
ieee80211_led_exit(local);
destroy_workqueue(local->workqueue);
fail_workqueue:
if (local->wiphy_ciphers_allocated) {
kfree(local->hw.wiphy->cipher_suites);
local->wiphy_ciphers_allocated = false;
}
kfree(local->int_scan_req);
return result;
}
EXPORT_SYMBOL(ieee80211_register_hw);
void ieee80211_unregister_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
tasklet_kill(&local->tx_pending_tasklet);
tasklet_kill(&local->tasklet);
#ifdef CONFIG_INET
unregister_inetaddr_notifier(&local->ifa_notifier);
#endif
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&local->ifa6_notifier);
#endif
rtnl_lock();
/*
* At this point, interface list manipulations are fine
* because the driver cannot be handing us frames any
* more and the tasklet is killed.
*/
ieee80211_remove_interfaces(local);
rtnl_unlock();
cancel_delayed_work_sync(&local->roc_work);
cancel_work_sync(&local->restart_work);
cancel_work_sync(&local->reconfig_filter);
flush_work(&local->sched_scan_stopped_work);
flush_work(&local->radar_detected_work);
ieee80211_clear_tx_pending(local);
rate_control_deinitialize(local);
if (skb_queue_len(&local->skb_queue) ||
skb_queue_len(&local->skb_queue_unreliable))
wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
wiphy_unregister(local->hw.wiphy);
destroy_workqueue(local->workqueue);
ieee80211_led_exit(local);
kfree(local->int_scan_req);
}
EXPORT_SYMBOL(ieee80211_unregister_hw);
static int ieee80211_free_ack_frame(int id, void *p, void *data)
{
WARN_ONCE(1, "Have pending ack frames!\n");
kfree_skb(p);
return 0;
}
void ieee80211_free_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
enum nl80211_band band;
mutex_destroy(&local->iflist_mtx);
mutex_destroy(&local->mtx);
if (local->wiphy_ciphers_allocated) {
kfree(local->hw.wiphy->cipher_suites);
local->wiphy_ciphers_allocated = false;
}
idr_for_each(&local->ack_status_frames,
ieee80211_free_ack_frame, NULL);
idr_destroy(&local->ack_status_frames);
sta_info_stop(local);
ieee80211_free_led_names(local);
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!(local->sband_allocated & BIT(band)))
continue;
kfree(local->hw.wiphy->bands[band]);
}
wiphy_free(local->hw.wiphy);
}
EXPORT_SYMBOL(ieee80211_free_hw);
static const char * const drop_reasons_monitor[] = {
#define V(x) #x,
[0] = "RX_DROP_MONITOR",
MAC80211_DROP_REASONS_MONITOR(V)
};
static struct drop_reason_list drop_reason_list_monitor = {
.reasons = drop_reasons_monitor,
.n_reasons = ARRAY_SIZE(drop_reasons_monitor),
};
static const char * const drop_reasons_unusable[] = {
[0] = "RX_DROP_UNUSABLE",
MAC80211_DROP_REASONS_UNUSABLE(V)
#undef V
};
static struct drop_reason_list drop_reason_list_unusable = {
.reasons = drop_reasons_unusable,
.n_reasons = ARRAY_SIZE(drop_reasons_unusable),
};
static int __init ieee80211_init(void)
{
struct sk_buff *skb;
int ret;
BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) +
IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
ret = rc80211_minstrel_init();
if (ret)
return ret;
ret = ieee80211_iface_init();
if (ret)
goto err_netdev;
drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR,
&drop_reason_list_monitor);
drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE,
&drop_reason_list_unusable);
return 0;
err_netdev:
rc80211_minstrel_exit();
return ret;
}
static void __exit ieee80211_exit(void)
{
rc80211_minstrel_exit();
ieee80211s_stop();
ieee80211_iface_exit();
drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR);
drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE);
rcu_barrier();
}
subsys_initcall(ieee80211_init);
module_exit(ieee80211_exit);
MODULE_DESCRIPTION("IEEE 802.11 subsystem");
MODULE_LICENSE("GPL");
| linux-master | net/mac80211/main.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2008, Jouni Malinen <[email protected]>
* Copyright (C) 2016-2017 Intel Deutschland GmbH
* Copyright (C) 2020-2022 Intel Corporation
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/compiler.h>
#include <linux/ieee80211.h>
#include <linux/gfp.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include <crypto/aes.h>
#include <crypto/utils.h>
#include "ieee80211_i.h"
#include "michael.h"
#include "tkip.h"
#include "aes_ccm.h"
#include "aes_cmac.h"
#include "aes_gmac.h"
#include "aes_gcm.h"
#include "wpa.h"
ieee80211_tx_result
ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
{
u8 *data, *key, *mic;
size_t data_len;
unsigned int hdrlen;
struct ieee80211_hdr *hdr;
struct sk_buff *skb = tx->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int tail;
hdr = (struct ieee80211_hdr *)skb->data;
if (!tx->key || tx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control))
return TX_CONTINUE;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (skb->len < hdrlen)
return TX_DROP;
data = skb->data + hdrlen;
data_len = skb->len - hdrlen;
if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) {
/* Need to use software crypto for the test */
info->control.hw_key = NULL;
}
if (info->control.hw_key &&
(info->flags & IEEE80211_TX_CTL_DONTFRAG ||
ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) &&
!(tx->key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
IEEE80211_KEY_FLAG_PUT_MIC_SPACE))) {
/* hwaccel - with no need for SW-generated MMIC or MIC space */
return TX_CONTINUE;
}
tail = MICHAEL_MIC_LEN;
if (!info->control.hw_key)
tail += IEEE80211_TKIP_ICV_LEN;
if (WARN(skb_tailroom(skb) < tail ||
skb_headroom(skb) < IEEE80211_TKIP_IV_LEN,
"mmic: not enough head/tail (%d/%d,%d/%d)\n",
skb_headroom(skb), IEEE80211_TKIP_IV_LEN,
skb_tailroom(skb), tail))
return TX_DROP;
mic = skb_put(skb, MICHAEL_MIC_LEN);
if (tx->key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) {
/* Zeroed MIC can help with debug */
memset(mic, 0, MICHAEL_MIC_LEN);
return TX_CONTINUE;
}
key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
michael_mic(key, hdr, data, data_len, mic);
if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE))
mic[0]++;
return TX_CONTINUE;
}
ieee80211_rx_result
ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
{
u8 *data, *key = NULL;
size_t data_len;
unsigned int hdrlen;
u8 mic[MICHAEL_MIC_LEN];
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
/*
* it makes no sense to check for MIC errors on anything other
* than data frames.
*/
if (!ieee80211_is_data_present(hdr->frame_control))
return RX_CONTINUE;
/*
* No way to verify the MIC if the hardware stripped it or
* the IV with the key index. In this case we have solely rely
* on the driver to set RX_FLAG_MMIC_ERROR in the event of a
* MIC failure report.
*/
if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) {
if (status->flag & RX_FLAG_MMIC_ERROR)
goto mic_fail_no_key;
if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key &&
rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
goto update_iv;
return RX_CONTINUE;
}
/*
* Some hardware seems to generate Michael MIC failure reports; even
* though, the frame was not encrypted with TKIP and therefore has no
* MIC. Ignore the flag them to avoid triggering countermeasures.
*/
if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
!(status->flag & RX_FLAG_DECRYPTED))
return RX_CONTINUE;
if (rx->sdata->vif.type == NL80211_IFTYPE_AP && rx->key->conf.keyidx) {
/*
* APs with pairwise keys should never receive Michael MIC
* errors for non-zero keyidx because these are reserved for
* group keys and only the AP is sending real multicast
* frames in the BSS.
*/
return RX_DROP_UNUSABLE;
}
if (status->flag & RX_FLAG_MMIC_ERROR)
goto mic_fail;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (skb->len < hdrlen + MICHAEL_MIC_LEN)
return RX_DROP_UNUSABLE;
if (skb_linearize(rx->skb))
return RX_DROP_UNUSABLE;
hdr = (void *)skb->data;
data = skb->data + hdrlen;
data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
michael_mic(key, hdr, data, data_len, mic);
if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
goto mic_fail;
/* remove Michael MIC from payload */
skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
update_iv:
/* update IV in key information to be able to detect replays */
rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32;
rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16;
return RX_CONTINUE;
mic_fail:
rx->key->u.tkip.mic_failures++;
mic_fail_no_key:
/*
* In some cases the key can be unset - e.g. a multicast packet, in
* a driver that supports HW encryption. Send up the key idx only if
* the key is set.
*/
cfg80211_michael_mic_failure(rx->sdata->dev, hdr->addr2,
is_multicast_ether_addr(hdr->addr1) ?
NL80211_KEYTYPE_GROUP :
NL80211_KEYTYPE_PAIRWISE,
rx->key ? rx->key->conf.keyidx : -1,
NULL, GFP_ATOMIC);
return RX_DROP_UNUSABLE;
}
static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_key *key = tx->key;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
unsigned int hdrlen;
int len, tail;
u64 pn;
u8 *pos;
if (info->control.hw_key &&
!(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
!(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
/* hwaccel - with no need for software-generated IV */
return 0;
}
hdrlen = ieee80211_hdrlen(hdr->frame_control);
len = skb->len - hdrlen;
if (info->control.hw_key)
tail = 0;
else
tail = IEEE80211_TKIP_ICV_LEN;
if (WARN_ON(skb_tailroom(skb) < tail ||
skb_headroom(skb) < IEEE80211_TKIP_IV_LEN))
return -1;
pos = skb_push(skb, IEEE80211_TKIP_IV_LEN);
memmove(pos, pos + IEEE80211_TKIP_IV_LEN, hdrlen);
pos += hdrlen;
/* the HW only needs room for the IV, but not the actual IV */
if (info->control.hw_key &&
(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
return 0;
/* Increase IV for the frame */
pn = atomic64_inc_return(&key->conf.tx_pn);
pos = ieee80211_tkip_add_iv(pos, &key->conf, pn);
/* hwaccel - with software IV */
if (info->control.hw_key)
return 0;
/* Add room for ICV */
skb_put(skb, IEEE80211_TKIP_ICV_LEN);
return ieee80211_tkip_encrypt_data(&tx->local->wep_tx_ctx,
key, skb, pos, len);
}
ieee80211_tx_result
ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
ieee80211_tx_set_protected(tx);
skb_queue_walk(&tx->skbs, skb) {
if (tkip_encrypt_skb(tx, skb) < 0)
return TX_DROP;
}
return TX_CONTINUE;
}
ieee80211_rx_result
ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
int hdrlen, res, hwaccel = 0;
struct ieee80211_key *key = rx->key;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (!ieee80211_is_data(hdr->frame_control))
return RX_CONTINUE;
if (!rx->sta || skb->len - hdrlen < 12)
return RX_DROP_UNUSABLE;
/* it may be possible to optimize this a bit more */
if (skb_linearize(rx->skb))
return RX_DROP_UNUSABLE;
hdr = (void *)skb->data;
/*
* Let TKIP code verify IV, but skip decryption.
* In the case where hardware checks the IV as well,
* we don't even get here, see ieee80211_rx_h_decrypt()
*/
if (status->flag & RX_FLAG_DECRYPTED)
hwaccel = 1;
res = ieee80211_tkip_decrypt_data(&rx->local->wep_rx_ctx,
key, skb->data + hdrlen,
skb->len - hdrlen, rx->sta->sta.addr,
hdr->addr1, hwaccel, rx->security_idx,
&rx->tkip.iv32,
&rx->tkip.iv16);
if (res != TKIP_DECRYPT_OK)
return RX_DROP_UNUSABLE;
/* Trim ICV */
if (!(status->flag & RX_FLAG_ICV_STRIPPED))
skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
/* Remove IV */
memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen);
skb_pull(skb, IEEE80211_TKIP_IV_LEN);
return RX_CONTINUE;
}
/*
* Calculate AAD for CCMP/GCMP, returning qos_tid since we
* need that in CCMP also for b_0.
*/
static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 mask_fc;
int a4_included, mgmt;
u8 qos_tid;
u16 len_a = 22;
/*
* Mask FC: zero subtype b4 b5 b6 (if not mgmt)
* Retry, PwrMgt, MoreData, Order (if Qos Data); set Protected
*/
mgmt = ieee80211_is_mgmt(hdr->frame_control);
mask_fc = hdr->frame_control;
mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY |
IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
if (!mgmt)
mask_fc &= ~cpu_to_le16(0x0070);
mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
a4_included = ieee80211_has_a4(hdr->frame_control);
if (a4_included)
len_a += 6;
if (ieee80211_is_data_qos(hdr->frame_control)) {
qos_tid = ieee80211_get_tid(hdr);
mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
len_a += 2;
} else {
qos_tid = 0;
}
/* AAD (extra authenticate-only data) / masked 802.11 header
* FC | A1 | A2 | A3 | SC | [A4] | [QC] */
put_unaligned_be16(len_a, &aad[0]);
put_unaligned(mask_fc, (__le16 *)&aad[2]);
memcpy(&aad[4], &hdr->addrs, 3 * ETH_ALEN);
/* Mask Seq#, leave Frag# */
aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f;
aad[23] = 0;
if (a4_included) {
memcpy(&aad[24], hdr->addr4, ETH_ALEN);
aad[30] = qos_tid;
aad[31] = 0;
} else {
memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
aad[24] = qos_tid;
}
return qos_tid;
}
static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u8 qos_tid = ccmp_gcmp_aad(skb, aad);
/* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
* mode authentication are not allowed to collide, yet both are derived
* from this vector b_0. We only set L := 1 here to indicate that the
* data size can be represented in (L+1) bytes. The CCM layer will take
* care of storing the data length in the top (L+1) bytes and setting
* and clearing the other bits as is required to derive the two IVs.
*/
b_0[0] = 0x1;
/* Nonce: Nonce Flags | A2 | PN
* Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7)
*/
b_0[1] = qos_tid | (ieee80211_is_mgmt(hdr->frame_control) << 4);
memcpy(&b_0[2], hdr->addr2, ETH_ALEN);
memcpy(&b_0[8], pn, IEEE80211_CCMP_PN_LEN);
}
static inline void ccmp_pn2hdr(u8 *hdr, u8 *pn, int key_id)
{
hdr[0] = pn[5];
hdr[1] = pn[4];
hdr[2] = 0;
hdr[3] = 0x20 | (key_id << 6);
hdr[4] = pn[3];
hdr[5] = pn[2];
hdr[6] = pn[1];
hdr[7] = pn[0];
}
static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr)
{
pn[0] = hdr[7];
pn[1] = hdr[6];
pn[2] = hdr[5];
pn[3] = hdr[4];
pn[4] = hdr[1];
pn[5] = hdr[0];
}
static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
unsigned int mic_len)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_key *key = tx->key;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int hdrlen, len, tail;
u8 *pos;
u8 pn[6];
u64 pn64;
u8 aad[CCM_AAD_LEN];
u8 b_0[AES_BLOCK_SIZE];
if (info->control.hw_key &&
!(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
!(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
!((info->control.hw_key->flags &
IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) &&
ieee80211_is_mgmt(hdr->frame_control))) {
/*
* hwaccel has no need for preallocated room for CCMP
* header or MIC fields
*/
return 0;
}
hdrlen = ieee80211_hdrlen(hdr->frame_control);
len = skb->len - hdrlen;
if (info->control.hw_key)
tail = 0;
else
tail = mic_len;
if (WARN_ON(skb_tailroom(skb) < tail ||
skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN))
return -1;
pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN);
memmove(pos, pos + IEEE80211_CCMP_HDR_LEN, hdrlen);
/* the HW only needs room for the IV, but not the actual IV */
if (info->control.hw_key &&
(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
return 0;
pos += hdrlen;
pn64 = atomic64_inc_return(&key->conf.tx_pn);
pn[5] = pn64;
pn[4] = pn64 >> 8;
pn[3] = pn64 >> 16;
pn[2] = pn64 >> 24;
pn[1] = pn64 >> 32;
pn[0] = pn64 >> 40;
ccmp_pn2hdr(pos, pn, key->conf.keyidx);
/* hwaccel - with software CCMP header */
if (info->control.hw_key)
return 0;
pos += IEEE80211_CCMP_HDR_LEN;
ccmp_special_blocks(skb, pn, b_0, aad);
return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
skb_put(skb, mic_len));
}
ieee80211_tx_result
ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx,
unsigned int mic_len)
{
struct sk_buff *skb;
ieee80211_tx_set_protected(tx);
skb_queue_walk(&tx->skbs, skb) {
if (ccmp_encrypt_skb(tx, skb, mic_len) < 0)
return TX_DROP;
}
return TX_CONTINUE;
}
ieee80211_rx_result
ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
unsigned int mic_len)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
int hdrlen;
struct ieee80211_key *key = rx->key;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
u8 pn[IEEE80211_CCMP_PN_LEN];
int data_len;
int queue;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (!ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_robust_mgmt_frame(skb))
return RX_CONTINUE;
if (status->flag & RX_FLAG_DECRYPTED) {
if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN))
return RX_DROP_UNUSABLE;
if (status->flag & RX_FLAG_MIC_STRIPPED)
mic_len = 0;
} else {
if (skb_linearize(rx->skb))
return RX_DROP_UNUSABLE;
}
/* reload hdr - skb might have been reallocated */
hdr = (void *)rx->skb->data;
data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
if (!rx->sta || data_len < 0)
return RX_DROP_UNUSABLE;
if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
int res;
ccmp_hdr2pn(pn, skb->data + hdrlen);
queue = rx->security_idx;
res = memcmp(pn, key->u.ccmp.rx_pn[queue],
IEEE80211_CCMP_PN_LEN);
if (res < 0 ||
(!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) {
key->u.ccmp.replays++;
return RX_DROP_U_REPLAY;
}
if (!(status->flag & RX_FLAG_DECRYPTED)) {
u8 aad[2 * AES_BLOCK_SIZE];
u8 b_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
ccmp_special_blocks(skb, pn, b_0, aad);
if (ieee80211_aes_ccm_decrypt(
key->u.ccmp.tfm, b_0, aad,
skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
data_len,
skb->data + skb->len - mic_len))
return RX_DROP_U_MIC_FAIL;
}
memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
if (unlikely(ieee80211_is_frag(hdr)))
memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
}
/* Remove CCMP header and MIC */
if (pskb_trim(skb, skb->len - mic_len))
return RX_DROP_UNUSABLE;
memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen);
skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
return RX_CONTINUE;
}
static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
memcpy(j_0, hdr->addr2, ETH_ALEN);
memcpy(&j_0[ETH_ALEN], pn, IEEE80211_GCMP_PN_LEN);
j_0[13] = 0;
j_0[14] = 0;
j_0[AES_BLOCK_SIZE - 1] = 0x01;
ccmp_gcmp_aad(skb, aad);
}
static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id)
{
hdr[0] = pn[5];
hdr[1] = pn[4];
hdr[2] = 0;
hdr[3] = 0x20 | (key_id << 6);
hdr[4] = pn[3];
hdr[5] = pn[2];
hdr[6] = pn[1];
hdr[7] = pn[0];
}
static inline void gcmp_hdr2pn(u8 *pn, const u8 *hdr)
{
pn[0] = hdr[7];
pn[1] = hdr[6];
pn[2] = hdr[5];
pn[3] = hdr[4];
pn[4] = hdr[1];
pn[5] = hdr[0];
}
static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key *key = tx->key;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int hdrlen, len, tail;
u8 *pos;
u8 pn[6];
u64 pn64;
u8 aad[GCM_AAD_LEN];
u8 j_0[AES_BLOCK_SIZE];
if (info->control.hw_key &&
!(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
!(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
!((info->control.hw_key->flags &
IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) &&
ieee80211_is_mgmt(hdr->frame_control))) {
/* hwaccel has no need for preallocated room for GCMP
* header or MIC fields
*/
return 0;
}
hdrlen = ieee80211_hdrlen(hdr->frame_control);
len = skb->len - hdrlen;
if (info->control.hw_key)
tail = 0;
else
tail = IEEE80211_GCMP_MIC_LEN;
if (WARN_ON(skb_tailroom(skb) < tail ||
skb_headroom(skb) < IEEE80211_GCMP_HDR_LEN))
return -1;
pos = skb_push(skb, IEEE80211_GCMP_HDR_LEN);
memmove(pos, pos + IEEE80211_GCMP_HDR_LEN, hdrlen);
skb_set_network_header(skb, skb_network_offset(skb) +
IEEE80211_GCMP_HDR_LEN);
/* the HW only needs room for the IV, but not the actual IV */
if (info->control.hw_key &&
(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
return 0;
pos += hdrlen;
pn64 = atomic64_inc_return(&key->conf.tx_pn);
pn[5] = pn64;
pn[4] = pn64 >> 8;
pn[3] = pn64 >> 16;
pn[2] = pn64 >> 24;
pn[1] = pn64 >> 32;
pn[0] = pn64 >> 40;
gcmp_pn2hdr(pos, pn, key->conf.keyidx);
/* hwaccel - with software GCMP header */
if (info->control.hw_key)
return 0;
pos += IEEE80211_GCMP_HDR_LEN;
gcmp_special_blocks(skb, pn, j_0, aad);
return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
skb_put(skb, IEEE80211_GCMP_MIC_LEN));
}
ieee80211_tx_result
ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
ieee80211_tx_set_protected(tx);
skb_queue_walk(&tx->skbs, skb) {
if (gcmp_encrypt_skb(tx, skb) < 0)
return TX_DROP;
}
return TX_CONTINUE;
}
ieee80211_rx_result
ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
int hdrlen;
struct ieee80211_key *key = rx->key;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
u8 pn[IEEE80211_GCMP_PN_LEN];
int data_len, queue, mic_len = IEEE80211_GCMP_MIC_LEN;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (!ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_robust_mgmt_frame(skb))
return RX_CONTINUE;
if (status->flag & RX_FLAG_DECRYPTED) {
if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN))
return RX_DROP_UNUSABLE;
if (status->flag & RX_FLAG_MIC_STRIPPED)
mic_len = 0;
} else {
if (skb_linearize(rx->skb))
return RX_DROP_UNUSABLE;
}
/* reload hdr - skb might have been reallocated */
hdr = (void *)rx->skb->data;
data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
if (!rx->sta || data_len < 0)
return RX_DROP_UNUSABLE;
if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
int res;
gcmp_hdr2pn(pn, skb->data + hdrlen);
queue = rx->security_idx;
res = memcmp(pn, key->u.gcmp.rx_pn[queue],
IEEE80211_GCMP_PN_LEN);
if (res < 0 ||
(!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) {
key->u.gcmp.replays++;
return RX_DROP_U_REPLAY;
}
if (!(status->flag & RX_FLAG_DECRYPTED)) {
u8 aad[2 * AES_BLOCK_SIZE];
u8 j_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
gcmp_special_blocks(skb, pn, j_0, aad);
if (ieee80211_aes_gcm_decrypt(
key->u.gcmp.tfm, j_0, aad,
skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
data_len,
skb->data + skb->len -
IEEE80211_GCMP_MIC_LEN))
return RX_DROP_U_MIC_FAIL;
}
memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
if (unlikely(ieee80211_is_frag(hdr)))
memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
}
/* Remove GCMP header and MIC */
if (pskb_trim(skb, skb->len - mic_len))
return RX_DROP_UNUSABLE;
memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen);
skb_pull(skb, IEEE80211_GCMP_HDR_LEN);
return RX_CONTINUE;
}
static void bip_aad(struct sk_buff *skb, u8 *aad)
{
__le16 mask_fc;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
/* BIP AAD: FC(masked) || A1 || A2 || A3 */
/* FC type/subtype */
/* Mask FC Retry, PwrMgt, MoreData flags to zero */
mask_fc = hdr->frame_control;
mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM |
IEEE80211_FCTL_MOREDATA);
put_unaligned(mask_fc, (__le16 *) &aad[0]);
/* A1 || A2 || A3 */
memcpy(aad + 2, &hdr->addrs, 3 * ETH_ALEN);
}
static inline void bip_ipn_set64(u8 *d, u64 pn)
{
*d++ = pn;
*d++ = pn >> 8;
*d++ = pn >> 16;
*d++ = pn >> 24;
*d++ = pn >> 32;
*d = pn >> 40;
}
static inline void bip_ipn_swap(u8 *d, const u8 *s)
{
*d++ = s[5];
*d++ = s[4];
*d++ = s[3];
*d++ = s[2];
*d++ = s[1];
*d = s[0];
}
ieee80211_tx_result
ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
struct ieee80211_tx_info *info;
struct ieee80211_key *key = tx->key;
struct ieee80211_mmie *mmie;
u8 aad[20];
u64 pn64;
if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
return TX_DROP;
skb = skb_peek(&tx->skbs);
info = IEEE80211_SKB_CB(skb);
if (info->control.hw_key &&
!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIE))
return TX_CONTINUE;
if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
return TX_DROP;
mmie = skb_put(skb, sizeof(*mmie));
mmie->element_id = WLAN_EID_MMIE;
mmie->length = sizeof(*mmie) - 2;
mmie->key_id = cpu_to_le16(key->conf.keyidx);
/* PN = PN + 1 */
pn64 = atomic64_inc_return(&key->conf.tx_pn);
bip_ipn_set64(mmie->sequence_number, pn64);
if (info->control.hw_key)
return TX_CONTINUE;
bip_aad(skb, aad);
/*
* MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64)
*/
ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mmie->mic);
return TX_CONTINUE;
}
ieee80211_tx_result
ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
struct ieee80211_tx_info *info;
struct ieee80211_key *key = tx->key;
struct ieee80211_mmie_16 *mmie;
u8 aad[20];
u64 pn64;
if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
return TX_DROP;
skb = skb_peek(&tx->skbs);
info = IEEE80211_SKB_CB(skb);
if (info->control.hw_key)
return TX_CONTINUE;
if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
return TX_DROP;
mmie = skb_put(skb, sizeof(*mmie));
mmie->element_id = WLAN_EID_MMIE;
mmie->length = sizeof(*mmie) - 2;
mmie->key_id = cpu_to_le16(key->conf.keyidx);
/* PN = PN + 1 */
pn64 = atomic64_inc_return(&key->conf.tx_pn);
bip_ipn_set64(mmie->sequence_number, pn64);
bip_aad(skb, aad);
/* MIC = AES-256-CMAC(IGTK, AAD || Management Frame Body || MMIE, 128)
*/
ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mmie->mic);
return TX_CONTINUE;
}
ieee80211_rx_result
ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
{
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_key *key = rx->key;
struct ieee80211_mmie *mmie;
u8 aad[20], mic[8], ipn[6];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
if (!ieee80211_is_mgmt(hdr->frame_control))
return RX_CONTINUE;
/* management frames are already linear */
if (skb->len < 24 + sizeof(*mmie))
return RX_DROP_UNUSABLE;
mmie = (struct ieee80211_mmie *)
(skb->data + skb->len - sizeof(*mmie));
if (mmie->element_id != WLAN_EID_MMIE ||
mmie->length != sizeof(*mmie) - 2)
return RX_DROP_U_BAD_MMIE; /* Invalid MMIE */
bip_ipn_swap(ipn, mmie->sequence_number);
if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) {
key->u.aes_cmac.replays++;
return RX_DROP_U_REPLAY;
}
if (!(status->flag & RX_FLAG_DECRYPTED)) {
/* hardware didn't decrypt/verify MIC */
bip_aad(skb, aad);
ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mic);
if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_cmac.icverrors++;
return RX_DROP_U_MIC_FAIL;
}
}
memcpy(key->u.aes_cmac.rx_pn, ipn, 6);
/* Remove MMIE */
skb_trim(skb, skb->len - sizeof(*mmie));
return RX_CONTINUE;
}
ieee80211_rx_result
ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
{
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_key *key = rx->key;
struct ieee80211_mmie_16 *mmie;
u8 aad[20], mic[16], ipn[6];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_mgmt(hdr->frame_control))
return RX_CONTINUE;
/* management frames are already linear */
if (skb->len < 24 + sizeof(*mmie))
return RX_DROP_UNUSABLE;
mmie = (struct ieee80211_mmie_16 *)
(skb->data + skb->len - sizeof(*mmie));
if (mmie->element_id != WLAN_EID_MMIE ||
mmie->length != sizeof(*mmie) - 2)
return RX_DROP_UNUSABLE; /* Invalid MMIE */
bip_ipn_swap(ipn, mmie->sequence_number);
if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) {
key->u.aes_cmac.replays++;
return RX_DROP_U_REPLAY;
}
if (!(status->flag & RX_FLAG_DECRYPTED)) {
/* hardware didn't decrypt/verify MIC */
bip_aad(skb, aad);
ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mic);
if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_cmac.icverrors++;
return RX_DROP_U_MIC_FAIL;
}
}
memcpy(key->u.aes_cmac.rx_pn, ipn, 6);
/* Remove MMIE */
skb_trim(skb, skb->len - sizeof(*mmie));
return RX_CONTINUE;
}
ieee80211_tx_result
ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
struct ieee80211_tx_info *info;
struct ieee80211_key *key = tx->key;
struct ieee80211_mmie_16 *mmie;
struct ieee80211_hdr *hdr;
u8 aad[GMAC_AAD_LEN];
u64 pn64;
u8 nonce[GMAC_NONCE_LEN];
if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
return TX_DROP;
skb = skb_peek(&tx->skbs);
info = IEEE80211_SKB_CB(skb);
if (info->control.hw_key)
return TX_CONTINUE;
if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
return TX_DROP;
mmie = skb_put(skb, sizeof(*mmie));
mmie->element_id = WLAN_EID_MMIE;
mmie->length = sizeof(*mmie) - 2;
mmie->key_id = cpu_to_le16(key->conf.keyidx);
/* PN = PN + 1 */
pn64 = atomic64_inc_return(&key->conf.tx_pn);
bip_ipn_set64(mmie->sequence_number, pn64);
bip_aad(skb, aad);
hdr = (struct ieee80211_hdr *)skb->data;
memcpy(nonce, hdr->addr2, ETH_ALEN);
bip_ipn_swap(nonce + ETH_ALEN, mmie->sequence_number);
/* MIC = AES-GMAC(IGTK, AAD || Management Frame Body || MMIE, 128) */
if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
skb->data + 24, skb->len - 24, mmie->mic) < 0)
return TX_DROP;
return TX_CONTINUE;
}
ieee80211_rx_result
ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
{
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_key *key = rx->key;
struct ieee80211_mmie_16 *mmie;
u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_mgmt(hdr->frame_control))
return RX_CONTINUE;
/* management frames are already linear */
if (skb->len < 24 + sizeof(*mmie))
return RX_DROP_UNUSABLE;
mmie = (struct ieee80211_mmie_16 *)
(skb->data + skb->len - sizeof(*mmie));
if (mmie->element_id != WLAN_EID_MMIE ||
mmie->length != sizeof(*mmie) - 2)
return RX_DROP_U_BAD_MMIE; /* Invalid MMIE */
bip_ipn_swap(ipn, mmie->sequence_number);
if (memcmp(ipn, key->u.aes_gmac.rx_pn, 6) <= 0) {
key->u.aes_gmac.replays++;
return RX_DROP_U_REPLAY;
}
if (!(status->flag & RX_FLAG_DECRYPTED)) {
/* hardware didn't decrypt/verify MIC */
bip_aad(skb, aad);
memcpy(nonce, hdr->addr2, ETH_ALEN);
memcpy(nonce + ETH_ALEN, ipn, 6);
mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC);
if (!mic)
return RX_DROP_UNUSABLE;
if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
skb->data + 24, skb->len - 24,
mic) < 0 ||
crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_gmac.icverrors++;
kfree(mic);
return RX_DROP_U_MIC_FAIL;
}
kfree(mic);
}
memcpy(key->u.aes_gmac.rx_pn, ipn, 6);
/* Remove MMIE */
skb_trim(skb, skb->len - sizeof(*mmie));
return RX_CONTINUE;
}
| linux-master | net/mac80211/wpa.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015 Intel Deutschland GmbH
* Copyright (C) 2022 Intel Corporation
*/
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "trace.h"
#include "driver-ops.h"
#include "debugfs_sta.h"
#include "debugfs_netdev.h"
int drv_start(struct ieee80211_local *local)
{
int ret;
might_sleep();
if (WARN_ON(local->started))
return -EALREADY;
trace_drv_start(local);
local->started = true;
/* allow rx frames */
smp_mb();
ret = local->ops->start(&local->hw);
trace_drv_return_int(local, ret);
if (ret)
local->started = false;
return ret;
}
void drv_stop(struct ieee80211_local *local)
{
might_sleep();
if (WARN_ON(!local->started))
return;
trace_drv_stop(local);
local->ops->stop(&local->hw);
trace_drv_return_void(local);
/* sync away all work on the tasklet before clearing started */
tasklet_disable(&local->tasklet);
tasklet_enable(&local->tasklet);
barrier();
local->started = false;
}
int drv_add_interface(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
int ret;
might_sleep();
if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
(sdata->vif.type == NL80211_IFTYPE_MONITOR &&
!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))))
return -EINVAL;
trace_drv_add_interface(local, sdata);
ret = local->ops->add_interface(&local->hw, &sdata->vif);
trace_drv_return_int(local, ret);
if (ret == 0)
sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
return ret;
}
int drv_change_interface(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
enum nl80211_iftype type, bool p2p)
{
int ret;
might_sleep();
if (!check_sdata_in_driver(sdata))
return -EIO;
trace_drv_change_interface(local, sdata, type, p2p);
ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
trace_drv_return_int(local, ret);
return ret;
}
void drv_remove_interface(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
might_sleep();
if (!check_sdata_in_driver(sdata))
return;
trace_drv_remove_interface(local, sdata);
local->ops->remove_interface(&local->hw, &sdata->vif);
sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
trace_drv_return_void(local);
}
__must_check
int drv_sta_state(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
int ret = 0;
might_sleep();
sdata = get_bss_sdata(sdata);
if (!check_sdata_in_driver(sdata))
return -EIO;
trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
if (local->ops->sta_state) {
ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
old_state, new_state);
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
ret = drv_sta_add(local, sdata, &sta->sta);
if (ret == 0) {
sta->uploaded = true;
if (rcu_access_pointer(sta->sta.rates))
drv_sta_rate_tbl_update(local, sdata, &sta->sta);
}
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
drv_sta_remove(local, sdata, &sta->sta);
}
trace_drv_return_int(local, ret);
return ret;
}
__must_check
int drv_sta_set_txpwr(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
int ret = -EOPNOTSUPP;
might_sleep();
sdata = get_bss_sdata(sdata);
if (!check_sdata_in_driver(sdata))
return -EIO;
trace_drv_sta_set_txpwr(local, sdata, &sta->sta);
if (local->ops->sta_set_txpwr)
ret = local->ops->sta_set_txpwr(&local->hw, &sdata->vif,
&sta->sta);
trace_drv_return_int(local, ret);
return ret;
}
void drv_sta_rc_update(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *sta, u32 changed)
{
sdata = get_bss_sdata(sdata);
if (!check_sdata_in_driver(sdata))
return;
WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
(sdata->vif.type != NL80211_IFTYPE_ADHOC &&
sdata->vif.type != NL80211_IFTYPE_MESH_POINT));
trace_drv_sta_rc_update(local, sdata, sta, changed);
if (local->ops->sta_rc_update)
local->ops->sta_rc_update(&local->hw, &sdata->vif,
sta, changed);
trace_drv_return_void(local);
}
int drv_conf_tx(struct ieee80211_local *local,
struct ieee80211_link_data *link, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
int ret = -EOPNOTSUPP;
might_sleep();
if (!check_sdata_in_driver(sdata))
return -EIO;
if (sdata->vif.active_links &&
!(sdata->vif.active_links & BIT(link->link_id)))
return 0;
if (params->cw_min == 0 || params->cw_min > params->cw_max) {
/*
* If we can't configure hardware anyway, don't warn. We may
* never have initialized the CW parameters.
*/
WARN_ONCE(local->ops->conf_tx,
"%s: invalid CW_min/CW_max: %d/%d\n",
sdata->name, params->cw_min, params->cw_max);
return -EINVAL;
}
trace_drv_conf_tx(local, sdata, link->link_id, ac, params);
if (local->ops->conf_tx)
ret = local->ops->conf_tx(&local->hw, &sdata->vif,
link->link_id, ac, params);
trace_drv_return_int(local, ret);
return ret;
}
u64 drv_get_tsf(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
u64 ret = -1ULL;
might_sleep();
if (!check_sdata_in_driver(sdata))
return ret;
trace_drv_get_tsf(local, sdata);
if (local->ops->get_tsf)
ret = local->ops->get_tsf(&local->hw, &sdata->vif);
trace_drv_return_u64(local, ret);
return ret;
}
void drv_set_tsf(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
u64 tsf)
{
might_sleep();
if (!check_sdata_in_driver(sdata))
return;
trace_drv_set_tsf(local, sdata, tsf);
if (local->ops->set_tsf)
local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
trace_drv_return_void(local);
}
void drv_offset_tsf(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
s64 offset)
{
might_sleep();
if (!check_sdata_in_driver(sdata))
return;
trace_drv_offset_tsf(local, sdata, offset);
if (local->ops->offset_tsf)
local->ops->offset_tsf(&local->hw, &sdata->vif, offset);
trace_drv_return_void(local);
}
void drv_reset_tsf(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
might_sleep();
if (!check_sdata_in_driver(sdata))
return;
trace_drv_reset_tsf(local, sdata);
if (local->ops->reset_tsf)
local->ops->reset_tsf(&local->hw, &sdata->vif);
trace_drv_return_void(local);
}
int drv_assign_vif_chanctx(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx *ctx)
{
int ret = 0;
drv_verify_link_exists(sdata, link_conf);
if (!check_sdata_in_driver(sdata))
return -EIO;
if (sdata->vif.active_links &&
!(sdata->vif.active_links & BIT(link_conf->link_id)))
return 0;
trace_drv_assign_vif_chanctx(local, sdata, link_conf, ctx);
if (local->ops->assign_vif_chanctx) {
WARN_ON_ONCE(!ctx->driver_present);
ret = local->ops->assign_vif_chanctx(&local->hw,
&sdata->vif,
link_conf,
&ctx->conf);
}
trace_drv_return_int(local, ret);
return ret;
}
void drv_unassign_vif_chanctx(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx *ctx)
{
might_sleep();
drv_verify_link_exists(sdata, link_conf);
if (!check_sdata_in_driver(sdata))
return;
if (sdata->vif.active_links &&
!(sdata->vif.active_links & BIT(link_conf->link_id)))
return;
trace_drv_unassign_vif_chanctx(local, sdata, link_conf, ctx);
if (local->ops->unassign_vif_chanctx) {
WARN_ON_ONCE(!ctx->driver_present);
local->ops->unassign_vif_chanctx(&local->hw,
&sdata->vif,
link_conf,
&ctx->conf);
}
trace_drv_return_void(local);
}
int drv_switch_vif_chanctx(struct ieee80211_local *local,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs, enum ieee80211_chanctx_switch_mode mode)
{
int ret = 0;
int i;
might_sleep();
if (!local->ops->switch_vif_chanctx)
return -EOPNOTSUPP;
for (i = 0; i < n_vifs; i++) {
struct ieee80211_chanctx *new_ctx =
container_of(vifs[i].new_ctx,
struct ieee80211_chanctx,
conf);
struct ieee80211_chanctx *old_ctx =
container_of(vifs[i].old_ctx,
struct ieee80211_chanctx,
conf);
WARN_ON_ONCE(!old_ctx->driver_present);
WARN_ON_ONCE((mode == CHANCTX_SWMODE_SWAP_CONTEXTS &&
new_ctx->driver_present) ||
(mode == CHANCTX_SWMODE_REASSIGN_VIF &&
!new_ctx->driver_present));
}
trace_drv_switch_vif_chanctx(local, vifs, n_vifs, mode);
ret = local->ops->switch_vif_chanctx(&local->hw,
vifs, n_vifs, mode);
trace_drv_return_int(local, ret);
if (!ret && mode == CHANCTX_SWMODE_SWAP_CONTEXTS) {
for (i = 0; i < n_vifs; i++) {
struct ieee80211_chanctx *new_ctx =
container_of(vifs[i].new_ctx,
struct ieee80211_chanctx,
conf);
struct ieee80211_chanctx *old_ctx =
container_of(vifs[i].old_ctx,
struct ieee80211_chanctx,
conf);
new_ctx->driver_present = true;
old_ctx->driver_present = false;
}
}
return ret;
}
int drv_ampdu_action(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_ampdu_params *params)
{
int ret = -EOPNOTSUPP;
might_sleep();
if (!sdata)
return -EIO;
sdata = get_bss_sdata(sdata);
if (!check_sdata_in_driver(sdata))
return -EIO;
trace_drv_ampdu_action(local, sdata, params);
if (local->ops->ampdu_action)
ret = local->ops->ampdu_action(&local->hw, &sdata->vif, params);
trace_drv_return_int(local, ret);
return ret;
}
void drv_link_info_changed(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *info,
int link_id, u64 changed)
{
might_sleep();
if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED) &&
sdata->vif.type != NL80211_IFTYPE_AP &&
sdata->vif.type != NL80211_IFTYPE_ADHOC &&
sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
sdata->vif.type != NL80211_IFTYPE_OCB))
return;
if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
sdata->vif.type == NL80211_IFTYPE_NAN ||
(sdata->vif.type == NL80211_IFTYPE_MONITOR &&
!sdata->vif.bss_conf.mu_mimo_owner &&
!(changed & BSS_CHANGED_TXPOWER))))
return;
if (!check_sdata_in_driver(sdata))
return;
if (sdata->vif.active_links &&
!(sdata->vif.active_links & BIT(link_id)))
return;
trace_drv_link_info_changed(local, sdata, info, changed);
if (local->ops->link_info_changed)
local->ops->link_info_changed(&local->hw, &sdata->vif,
info, changed);
else if (local->ops->bss_info_changed)
local->ops->bss_info_changed(&local->hw, &sdata->vif,
info, changed);
trace_drv_return_void(local);
}
int drv_set_key(struct ieee80211_local *local,
enum set_key_cmd cmd,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
int ret;
might_sleep();
sdata = get_bss_sdata(sdata);
if (!check_sdata_in_driver(sdata))
return -EIO;
if (WARN_ON(key->link_id >= 0 && sdata->vif.active_links &&
!(sdata->vif.active_links & BIT(key->link_id))))
return -ENOLINK;
trace_drv_set_key(local, cmd, sdata, sta, key);
ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
trace_drv_return_int(local, ret);
return ret;
}
int drv_change_vif_links(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
u16 old_links, u16 new_links,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
{
struct ieee80211_link_data *link;
unsigned long links_to_add;
unsigned long links_to_rem;
unsigned int link_id;
int ret = -EOPNOTSUPP;
might_sleep();
if (!check_sdata_in_driver(sdata))
return -EIO;
if (old_links == new_links)
return 0;
links_to_add = ~old_links & new_links;
links_to_rem = old_links & ~new_links;
for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
link = rcu_access_pointer(sdata->link[link_id]);
ieee80211_link_debugfs_drv_remove(link);
}
trace_drv_change_vif_links(local, sdata, old_links, new_links);
if (local->ops->change_vif_links)
ret = local->ops->change_vif_links(&local->hw, &sdata->vif,
old_links, new_links, old);
trace_drv_return_int(local, ret);
if (ret)
return ret;
for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
link = rcu_access_pointer(sdata->link[link_id]);
ieee80211_link_debugfs_drv_add(link);
}
return 0;
}
int drv_change_sta_links(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links)
{
struct sta_info *info = container_of(sta, struct sta_info, sta);
struct link_sta_info *link_sta;
unsigned long links_to_add;
unsigned long links_to_rem;
unsigned int link_id;
int ret = -EOPNOTSUPP;
might_sleep();
if (!check_sdata_in_driver(sdata))
return -EIO;
old_links &= sdata->vif.active_links;
new_links &= sdata->vif.active_links;
if (old_links == new_links)
return 0;
links_to_add = ~old_links & new_links;
links_to_rem = old_links & ~new_links;
for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
link_sta = rcu_dereference_protected(info->link[link_id],
lockdep_is_held(&local->sta_mtx));
ieee80211_link_sta_debugfs_drv_remove(link_sta);
}
trace_drv_change_sta_links(local, sdata, sta, old_links, new_links);
if (local->ops->change_sta_links)
ret = local->ops->change_sta_links(&local->hw, &sdata->vif, sta,
old_links, new_links);
trace_drv_return_int(local, ret);
if (ret)
return ret;
for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
link_sta = rcu_dereference_protected(info->link[link_id],
lockdep_is_held(&local->sta_mtx));
ieee80211_link_sta_debugfs_drv_add(link_sta);
}
return 0;
}
| linux-master | net/mac80211/driver-ops.c |
// SPDX-License-Identifier: ISC
/*
* Copyright (C) 2019 Felix Fietkau <[email protected]>
* Copyright (C) 2021-2022 Intel Corporation
*/
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "sta_info.h"
#define AVG_PKT_SIZE 1024
/* Number of bits for an average sized packet */
#define MCS_NBITS (AVG_PKT_SIZE << 3)
/* Number of kilo-symbols (symbols * 1024) for a packet with (bps) bits per
* symbol. We use k-symbols to avoid rounding in the _TIME macros below.
*/
#define MCS_N_KSYMS(bps) DIV_ROUND_UP(MCS_NBITS << 10, (bps))
/* Transmission time (in 1024 * usec) for a packet containing (ksyms) * 1024
* symbols.
*/
#define MCS_SYMBOL_TIME(sgi, ksyms) \
(sgi ? \
((ksyms) * 4 * 18) / 20 : /* 3.6 us per sym */ \
((ksyms) * 4) /* 4.0 us per sym */ \
)
/* Transmit duration for the raw data part of an average sized packet */
#define MCS_DURATION(streams, sgi, bps) \
((u32)MCS_SYMBOL_TIME(sgi, MCS_N_KSYMS((streams) * (bps))))
#define MCS_DURATION_S(shift, streams, sgi, bps) \
((u16)((MCS_DURATION(streams, sgi, bps) >> shift)))
/* These should match the values in enum nl80211_he_gi */
#define HE_GI_08 0
#define HE_GI_16 1
#define HE_GI_32 2
/* Transmission time (1024 usec) for a packet containing (ksyms) * k-symbols */
#define HE_SYMBOL_TIME(gi, ksyms) \
(gi == HE_GI_08 ? \
((ksyms) * 16 * 17) / 20 : /* 13.6 us per sym */ \
(gi == HE_GI_16 ? \
((ksyms) * 16 * 18) / 20 : /* 14.4 us per sym */ \
((ksyms) * 16) /* 16.0 us per sym */ \
))
/* Transmit duration for the raw data part of an average sized packet */
#define HE_DURATION(streams, gi, bps) \
((u32)HE_SYMBOL_TIME(gi, MCS_N_KSYMS((streams) * (bps))))
#define HE_DURATION_S(shift, streams, gi, bps) \
(HE_DURATION(streams, gi, bps) >> shift)
#define BW_20 0
#define BW_40 1
#define BW_80 2
#define BW_160 3
/*
* Define group sort order: HT40 -> SGI -> #streams
*/
#define IEEE80211_MAX_STREAMS 4
#define IEEE80211_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */
#define IEEE80211_VHT_STREAM_GROUPS 8 /* BW(=4) * SGI(=2) */
#define IEEE80211_HE_MAX_STREAMS 8
#define IEEE80211_HT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
IEEE80211_HT_STREAM_GROUPS)
#define IEEE80211_VHT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
IEEE80211_VHT_STREAM_GROUPS)
#define IEEE80211_HT_GROUP_0 0
#define IEEE80211_VHT_GROUP_0 (IEEE80211_HT_GROUP_0 + IEEE80211_HT_GROUPS_NB)
#define IEEE80211_HE_GROUP_0 (IEEE80211_VHT_GROUP_0 + IEEE80211_VHT_GROUPS_NB)
#define MCS_GROUP_RATES 12
#define HT_GROUP_IDX(_streams, _sgi, _ht40) \
IEEE80211_HT_GROUP_0 + \
IEEE80211_MAX_STREAMS * 2 * _ht40 + \
IEEE80211_MAX_STREAMS * _sgi + \
_streams - 1
#define _MAX(a, b) (((a)>(b))?(a):(b))
#define GROUP_SHIFT(duration) \
_MAX(0, 16 - __builtin_clz(duration))
/* MCS rate information for an MCS group */
#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \
[HT_GROUP_IDX(_streams, _sgi, _ht40)] = { \
.shift = _s, \
.duration = { \
MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 54 : 26), \
MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 108 : 52), \
MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 162 : 78), \
MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 216 : 104), \
MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 324 : 156), \
MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 432 : 208), \
MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 486 : 234), \
MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 540 : 260) \
} \
}
#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \
GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26))
#define MCS_GROUP(_streams, _sgi, _ht40) \
__MCS_GROUP(_streams, _sgi, _ht40, \
MCS_GROUP_SHIFT(_streams, _sgi, _ht40))
#define VHT_GROUP_IDX(_streams, _sgi, _bw) \
(IEEE80211_VHT_GROUP_0 + \
IEEE80211_MAX_STREAMS * 2 * (_bw) + \
IEEE80211_MAX_STREAMS * (_sgi) + \
(_streams) - 1)
#define BW2VBPS(_bw, r4, r3, r2, r1) \
(_bw == BW_160 ? r4 : _bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
#define __VHT_GROUP(_streams, _sgi, _bw, _s) \
[VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
.shift = _s, \
.duration = { \
MCS_DURATION_S(_s, _streams, _sgi, \
BW2VBPS(_bw, 234, 117, 54, 26)), \
MCS_DURATION_S(_s, _streams, _sgi, \
BW2VBPS(_bw, 468, 234, 108, 52)), \
MCS_DURATION_S(_s, _streams, _sgi, \
BW2VBPS(_bw, 702, 351, 162, 78)), \
MCS_DURATION_S(_s, _streams, _sgi, \
BW2VBPS(_bw, 936, 468, 216, 104)), \
MCS_DURATION_S(_s, _streams, _sgi, \
BW2VBPS(_bw, 1404, 702, 324, 156)), \
MCS_DURATION_S(_s, _streams, _sgi, \
BW2VBPS(_bw, 1872, 936, 432, 208)), \
MCS_DURATION_S(_s, _streams, _sgi, \
BW2VBPS(_bw, 2106, 1053, 486, 234)), \
MCS_DURATION_S(_s, _streams, _sgi, \
BW2VBPS(_bw, 2340, 1170, 540, 260)), \
MCS_DURATION_S(_s, _streams, _sgi, \
BW2VBPS(_bw, 2808, 1404, 648, 312)), \
MCS_DURATION_S(_s, _streams, _sgi, \
BW2VBPS(_bw, 3120, 1560, 720, 346)) \
} \
}
#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \
GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 243, 117, 54, 26)))
#define VHT_GROUP(_streams, _sgi, _bw) \
__VHT_GROUP(_streams, _sgi, _bw, \
VHT_GROUP_SHIFT(_streams, _sgi, _bw))
#define HE_GROUP_IDX(_streams, _gi, _bw) \
(IEEE80211_HE_GROUP_0 + \
IEEE80211_HE_MAX_STREAMS * 3 * (_bw) + \
IEEE80211_HE_MAX_STREAMS * (_gi) + \
(_streams) - 1)
#define __HE_GROUP(_streams, _gi, _bw, _s) \
[HE_GROUP_IDX(_streams, _gi, _bw)] = { \
.shift = _s, \
.duration = { \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 979, 489, 230, 115)), \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 1958, 979, 475, 230)), \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 2937, 1468, 705, 345)), \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 3916, 1958, 936, 475)), \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 5875, 2937, 1411, 705)), \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 7833, 3916, 1872, 936)), \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 8827, 4406, 2102, 1051)), \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 9806, 4896, 2347, 1166)), \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 11764, 5875, 2808, 1411)), \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 13060, 6523, 3124, 1555)), \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 14702, 7344, 3513, 1756)), \
HE_DURATION_S(_s, _streams, _gi, \
BW2VBPS(_bw, 16329, 8164, 3902, 1944)) \
} \
}
#define HE_GROUP_SHIFT(_streams, _gi, _bw) \
GROUP_SHIFT(HE_DURATION(_streams, _gi, \
BW2VBPS(_bw, 979, 489, 230, 115)))
#define HE_GROUP(_streams, _gi, _bw) \
__HE_GROUP(_streams, _gi, _bw, \
HE_GROUP_SHIFT(_streams, _gi, _bw))
struct mcs_group {
u8 shift;
u16 duration[MCS_GROUP_RATES];
};
static const struct mcs_group airtime_mcs_groups[] = {
MCS_GROUP(1, 0, BW_20),
MCS_GROUP(2, 0, BW_20),
MCS_GROUP(3, 0, BW_20),
MCS_GROUP(4, 0, BW_20),
MCS_GROUP(1, 1, BW_20),
MCS_GROUP(2, 1, BW_20),
MCS_GROUP(3, 1, BW_20),
MCS_GROUP(4, 1, BW_20),
MCS_GROUP(1, 0, BW_40),
MCS_GROUP(2, 0, BW_40),
MCS_GROUP(3, 0, BW_40),
MCS_GROUP(4, 0, BW_40),
MCS_GROUP(1, 1, BW_40),
MCS_GROUP(2, 1, BW_40),
MCS_GROUP(3, 1, BW_40),
MCS_GROUP(4, 1, BW_40),
VHT_GROUP(1, 0, BW_20),
VHT_GROUP(2, 0, BW_20),
VHT_GROUP(3, 0, BW_20),
VHT_GROUP(4, 0, BW_20),
VHT_GROUP(1, 1, BW_20),
VHT_GROUP(2, 1, BW_20),
VHT_GROUP(3, 1, BW_20),
VHT_GROUP(4, 1, BW_20),
VHT_GROUP(1, 0, BW_40),
VHT_GROUP(2, 0, BW_40),
VHT_GROUP(3, 0, BW_40),
VHT_GROUP(4, 0, BW_40),
VHT_GROUP(1, 1, BW_40),
VHT_GROUP(2, 1, BW_40),
VHT_GROUP(3, 1, BW_40),
VHT_GROUP(4, 1, BW_40),
VHT_GROUP(1, 0, BW_80),
VHT_GROUP(2, 0, BW_80),
VHT_GROUP(3, 0, BW_80),
VHT_GROUP(4, 0, BW_80),
VHT_GROUP(1, 1, BW_80),
VHT_GROUP(2, 1, BW_80),
VHT_GROUP(3, 1, BW_80),
VHT_GROUP(4, 1, BW_80),
VHT_GROUP(1, 0, BW_160),
VHT_GROUP(2, 0, BW_160),
VHT_GROUP(3, 0, BW_160),
VHT_GROUP(4, 0, BW_160),
VHT_GROUP(1, 1, BW_160),
VHT_GROUP(2, 1, BW_160),
VHT_GROUP(3, 1, BW_160),
VHT_GROUP(4, 1, BW_160),
HE_GROUP(1, HE_GI_08, BW_20),
HE_GROUP(2, HE_GI_08, BW_20),
HE_GROUP(3, HE_GI_08, BW_20),
HE_GROUP(4, HE_GI_08, BW_20),
HE_GROUP(5, HE_GI_08, BW_20),
HE_GROUP(6, HE_GI_08, BW_20),
HE_GROUP(7, HE_GI_08, BW_20),
HE_GROUP(8, HE_GI_08, BW_20),
HE_GROUP(1, HE_GI_16, BW_20),
HE_GROUP(2, HE_GI_16, BW_20),
HE_GROUP(3, HE_GI_16, BW_20),
HE_GROUP(4, HE_GI_16, BW_20),
HE_GROUP(5, HE_GI_16, BW_20),
HE_GROUP(6, HE_GI_16, BW_20),
HE_GROUP(7, HE_GI_16, BW_20),
HE_GROUP(8, HE_GI_16, BW_20),
HE_GROUP(1, HE_GI_32, BW_20),
HE_GROUP(2, HE_GI_32, BW_20),
HE_GROUP(3, HE_GI_32, BW_20),
HE_GROUP(4, HE_GI_32, BW_20),
HE_GROUP(5, HE_GI_32, BW_20),
HE_GROUP(6, HE_GI_32, BW_20),
HE_GROUP(7, HE_GI_32, BW_20),
HE_GROUP(8, HE_GI_32, BW_20),
HE_GROUP(1, HE_GI_08, BW_40),
HE_GROUP(2, HE_GI_08, BW_40),
HE_GROUP(3, HE_GI_08, BW_40),
HE_GROUP(4, HE_GI_08, BW_40),
HE_GROUP(5, HE_GI_08, BW_40),
HE_GROUP(6, HE_GI_08, BW_40),
HE_GROUP(7, HE_GI_08, BW_40),
HE_GROUP(8, HE_GI_08, BW_40),
HE_GROUP(1, HE_GI_16, BW_40),
HE_GROUP(2, HE_GI_16, BW_40),
HE_GROUP(3, HE_GI_16, BW_40),
HE_GROUP(4, HE_GI_16, BW_40),
HE_GROUP(5, HE_GI_16, BW_40),
HE_GROUP(6, HE_GI_16, BW_40),
HE_GROUP(7, HE_GI_16, BW_40),
HE_GROUP(8, HE_GI_16, BW_40),
HE_GROUP(1, HE_GI_32, BW_40),
HE_GROUP(2, HE_GI_32, BW_40),
HE_GROUP(3, HE_GI_32, BW_40),
HE_GROUP(4, HE_GI_32, BW_40),
HE_GROUP(5, HE_GI_32, BW_40),
HE_GROUP(6, HE_GI_32, BW_40),
HE_GROUP(7, HE_GI_32, BW_40),
HE_GROUP(8, HE_GI_32, BW_40),
HE_GROUP(1, HE_GI_08, BW_80),
HE_GROUP(2, HE_GI_08, BW_80),
HE_GROUP(3, HE_GI_08, BW_80),
HE_GROUP(4, HE_GI_08, BW_80),
HE_GROUP(5, HE_GI_08, BW_80),
HE_GROUP(6, HE_GI_08, BW_80),
HE_GROUP(7, HE_GI_08, BW_80),
HE_GROUP(8, HE_GI_08, BW_80),
HE_GROUP(1, HE_GI_16, BW_80),
HE_GROUP(2, HE_GI_16, BW_80),
HE_GROUP(3, HE_GI_16, BW_80),
HE_GROUP(4, HE_GI_16, BW_80),
HE_GROUP(5, HE_GI_16, BW_80),
HE_GROUP(6, HE_GI_16, BW_80),
HE_GROUP(7, HE_GI_16, BW_80),
HE_GROUP(8, HE_GI_16, BW_80),
HE_GROUP(1, HE_GI_32, BW_80),
HE_GROUP(2, HE_GI_32, BW_80),
HE_GROUP(3, HE_GI_32, BW_80),
HE_GROUP(4, HE_GI_32, BW_80),
HE_GROUP(5, HE_GI_32, BW_80),
HE_GROUP(6, HE_GI_32, BW_80),
HE_GROUP(7, HE_GI_32, BW_80),
HE_GROUP(8, HE_GI_32, BW_80),
HE_GROUP(1, HE_GI_08, BW_160),
HE_GROUP(2, HE_GI_08, BW_160),
HE_GROUP(3, HE_GI_08, BW_160),
HE_GROUP(4, HE_GI_08, BW_160),
HE_GROUP(5, HE_GI_08, BW_160),
HE_GROUP(6, HE_GI_08, BW_160),
HE_GROUP(7, HE_GI_08, BW_160),
HE_GROUP(8, HE_GI_08, BW_160),
HE_GROUP(1, HE_GI_16, BW_160),
HE_GROUP(2, HE_GI_16, BW_160),
HE_GROUP(3, HE_GI_16, BW_160),
HE_GROUP(4, HE_GI_16, BW_160),
HE_GROUP(5, HE_GI_16, BW_160),
HE_GROUP(6, HE_GI_16, BW_160),
HE_GROUP(7, HE_GI_16, BW_160),
HE_GROUP(8, HE_GI_16, BW_160),
HE_GROUP(1, HE_GI_32, BW_160),
HE_GROUP(2, HE_GI_32, BW_160),
HE_GROUP(3, HE_GI_32, BW_160),
HE_GROUP(4, HE_GI_32, BW_160),
HE_GROUP(5, HE_GI_32, BW_160),
HE_GROUP(6, HE_GI_32, BW_160),
HE_GROUP(7, HE_GI_32, BW_160),
HE_GROUP(8, HE_GI_32, BW_160),
};
static u32
ieee80211_calc_legacy_rate_duration(u16 bitrate, bool short_pre,
bool cck, int len)
{
u32 duration;
if (cck) {
duration = 144 + 48; /* preamble + PLCP */
if (short_pre)
duration >>= 1;
duration += 10; /* SIFS */
} else {
duration = 20 + 16; /* premable + SIFS */
}
len <<= 3;
duration += (len * 10) / bitrate;
return duration;
}
static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
struct ieee80211_rx_status *status,
u32 *overhead)
{
bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI;
int bw, streams;
int group, idx;
u32 duration;
switch (status->bw) {
case RATE_INFO_BW_20:
bw = BW_20;
break;
case RATE_INFO_BW_40:
bw = BW_40;
break;
case RATE_INFO_BW_80:
bw = BW_80;
break;
case RATE_INFO_BW_160:
bw = BW_160;
break;
default:
WARN_ON_ONCE(1);
return 0;
}
switch (status->encoding) {
case RX_ENC_VHT:
streams = status->nss;
idx = status->rate_idx;
group = VHT_GROUP_IDX(streams, sgi, bw);
break;
case RX_ENC_HT:
streams = ((status->rate_idx >> 3) & 3) + 1;
idx = status->rate_idx & 7;
group = HT_GROUP_IDX(streams, sgi, bw);
break;
case RX_ENC_HE:
streams = status->nss;
idx = status->rate_idx;
group = HE_GROUP_IDX(streams, status->he_gi, bw);
break;
default:
WARN_ON_ONCE(1);
return 0;
}
if (WARN_ON_ONCE((status->encoding != RX_ENC_HE && streams > 4) ||
(status->encoding == RX_ENC_HE && streams > 8)))
return 0;
if (idx >= MCS_GROUP_RATES)
return 0;
duration = airtime_mcs_groups[group].duration[idx];
duration <<= airtime_mcs_groups[group].shift;
*overhead = 36 + (streams << 2);
return duration;
}
u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
struct ieee80211_rx_status *status,
int len)
{
struct ieee80211_supported_band *sband;
u32 duration, overhead = 0;
if (status->encoding == RX_ENC_LEGACY) {
const struct ieee80211_rate *rate;
bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
bool cck;
/* on 60GHz or sub-1GHz band, there are no legacy rates */
if (WARN_ON_ONCE(status->band == NL80211_BAND_60GHZ ||
status->band == NL80211_BAND_S1GHZ))
return 0;
sband = hw->wiphy->bands[status->band];
if (!sband || status->rate_idx >= sband->n_bitrates)
return 0;
rate = &sband->bitrates[status->rate_idx];
cck = rate->flags & IEEE80211_RATE_MANDATORY_B;
return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp,
cck, len);
}
duration = ieee80211_get_rate_duration(hw, status, &overhead);
if (!duration)
return 0;
duration *= len;
duration /= AVG_PKT_SIZE;
duration /= 1024;
return duration + overhead;
}
EXPORT_SYMBOL_GPL(ieee80211_calc_rx_airtime);
static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw,
struct ieee80211_rx_status *stat, u8 band,
struct rate_info *ri)
{
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
int i;
if (!ri || !sband)
return false;
stat->bw = ri->bw;
stat->nss = ri->nss;
stat->rate_idx = ri->mcs;
if (ri->flags & RATE_INFO_FLAGS_HE_MCS)
stat->encoding = RX_ENC_HE;
else if (ri->flags & RATE_INFO_FLAGS_VHT_MCS)
stat->encoding = RX_ENC_VHT;
else if (ri->flags & RATE_INFO_FLAGS_MCS)
stat->encoding = RX_ENC_HT;
else
stat->encoding = RX_ENC_LEGACY;
if (ri->flags & RATE_INFO_FLAGS_SHORT_GI)
stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
stat->he_gi = ri->he_gi;
if (stat->encoding != RX_ENC_LEGACY)
return true;
stat->rate_idx = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if (ri->legacy != sband->bitrates[i].bitrate)
continue;
stat->rate_idx = i;
return true;
}
return false;
}
static int ieee80211_fill_rx_status(struct ieee80211_rx_status *stat,
struct ieee80211_hw *hw,
struct ieee80211_tx_rate *rate,
struct rate_info *ri, u8 band, int len)
{
memset(stat, 0, sizeof(*stat));
stat->band = band;
if (ieee80211_fill_rate_info(hw, stat, band, ri))
return 0;
if (rate->idx < 0 || !rate->count)
return -1;
if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
stat->bw = RATE_INFO_BW_160;
else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
stat->bw = RATE_INFO_BW_80;
else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
stat->bw = RATE_INFO_BW_40;
else
stat->bw = RATE_INFO_BW_20;
stat->enc_flags = 0;
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
stat->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
stat->rate_idx = rate->idx;
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
stat->encoding = RX_ENC_VHT;
stat->rate_idx = ieee80211_rate_get_vht_mcs(rate);
stat->nss = ieee80211_rate_get_vht_nss(rate);
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
stat->encoding = RX_ENC_HT;
} else {
stat->encoding = RX_ENC_LEGACY;
}
return 0;
}
static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw,
struct ieee80211_tx_rate *rate,
struct rate_info *ri,
u8 band, int len)
{
struct ieee80211_rx_status stat;
if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len))
return 0;
return ieee80211_calc_rx_airtime(hw, &stat, len);
}
u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
int len)
{
u32 duration = 0;
int i;
for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
struct ieee80211_tx_rate *rate = &info->status.rates[i];
u32 cur_duration;
cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate, NULL,
info->band, len);
if (!cur_duration)
break;
duration += cur_duration * rate->count;
}
return duration;
}
EXPORT_SYMBOL_GPL(ieee80211_calc_tx_airtime);
u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *pubsta,
int len, bool ampdu)
{
struct ieee80211_supported_band *sband;
struct ieee80211_chanctx_conf *conf;
int rateidx, shift = 0;
bool cck, short_pream;
u32 basic_rates;
u8 band = 0;
u16 rate;
len += 38; /* Ethernet header length */
conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (conf) {
band = conf->def.chan->band;
shift = ieee80211_chandef_get_shift(&conf->def);
}
if (pubsta) {
struct sta_info *sta = container_of(pubsta, struct sta_info,
sta);
struct ieee80211_rx_status stat;
struct ieee80211_tx_rate *tx_rate = &sta->deflink.tx_stats.last_rate;
struct rate_info *ri = &sta->deflink.tx_stats.last_rate_info;
u32 duration, overhead;
u8 agg_shift;
if (ieee80211_fill_rx_status(&stat, hw, tx_rate, ri, band, len))
return 0;
if (stat.encoding == RX_ENC_LEGACY || !ampdu)
return ieee80211_calc_rx_airtime(hw, &stat, len);
duration = ieee80211_get_rate_duration(hw, &stat, &overhead);
/*
* Assume that HT/VHT transmission on any AC except VO will
* use aggregation. Since we don't have reliable reporting
* of aggregation length, assume an average size based on the
* tx rate.
* This will not be very accurate, but much better than simply
* assuming un-aggregated tx in all cases.
*/
if (duration > 400 * 1024) /* <= VHT20 MCS2 1S */
agg_shift = 1;
else if (duration > 250 * 1024) /* <= VHT20 MCS3 1S or MCS1 2S */
agg_shift = 2;
else if (duration > 150 * 1024) /* <= VHT20 MCS5 1S or MCS2 2S */
agg_shift = 3;
else if (duration > 70 * 1024) /* <= VHT20 MCS5 2S */
agg_shift = 4;
else if (stat.encoding != RX_ENC_HE ||
duration > 20 * 1024) /* <= HE40 MCS6 2S */
agg_shift = 5;
else
agg_shift = 6;
duration *= len;
duration /= AVG_PKT_SIZE;
duration /= 1024;
duration += (overhead >> agg_shift);
return max_t(u32, duration, 4);
}
if (!conf)
return 0;
/* No station to get latest rate from, so calculate the worst-case
* duration using the lowest configured basic rate.
*/
sband = hw->wiphy->bands[band];
basic_rates = vif->bss_conf.basic_rates;
short_pream = vif->bss_conf.use_short_preamble;
rateidx = basic_rates ? ffs(basic_rates) - 1 : 0;
rate = sband->bitrates[rateidx].bitrate << shift;
cck = sband->bitrates[rateidx].flags & IEEE80211_RATE_MANDATORY_B;
return ieee80211_calc_legacy_rate_duration(rate, short_pream, cck, len);
}
| linux-master | net/mac80211/airtime.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2003-2005 Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <[email protected]>
* Copyright 2007 Johannes Berg <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright (C) 2018 - 2022 Intel Corporation
*/
#include <linux/debugfs.h>
#include <linux/ieee80211.h>
#include "ieee80211_i.h"
#include "debugfs.h"
#include "debugfs_sta.h"
#include "sta_info.h"
#include "driver-ops.h"
/* sta attributtes */
#define STA_READ(name, field, format_string) \
static ssize_t sta_ ##name## _read(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
struct sta_info *sta = file->private_data; \
return mac80211_format_buffer(userbuf, count, ppos, \
format_string, sta->field); \
}
#define STA_READ_D(name, field) STA_READ(name, field, "%d\n")
#define STA_OPS(name) \
static const struct file_operations sta_ ##name## _ops = { \
.read = sta_##name##_read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
}
#define STA_OPS_RW(name) \
static const struct file_operations sta_ ##name## _ops = { \
.read = sta_##name##_read, \
.write = sta_##name##_write, \
.open = simple_open, \
.llseek = generic_file_llseek, \
}
#define STA_FILE(name, field, format) \
STA_READ_##format(name, field) \
STA_OPS(name)
STA_FILE(aid, sta.aid, D);
static const char * const sta_flag_names[] = {
#define FLAG(F) [WLAN_STA_##F] = #F
FLAG(AUTH),
FLAG(ASSOC),
FLAG(PS_STA),
FLAG(AUTHORIZED),
FLAG(SHORT_PREAMBLE),
FLAG(WDS),
FLAG(CLEAR_PS_FILT),
FLAG(MFP),
FLAG(BLOCK_BA),
FLAG(PS_DRIVER),
FLAG(PSPOLL),
FLAG(TDLS_PEER),
FLAG(TDLS_PEER_AUTH),
FLAG(TDLS_INITIATOR),
FLAG(TDLS_CHAN_SWITCH),
FLAG(TDLS_OFF_CHANNEL),
FLAG(TDLS_WIDER_BW),
FLAG(UAPSD),
FLAG(SP),
FLAG(4ADDR_EVENT),
FLAG(INSERTED),
FLAG(RATE_CONTROL),
FLAG(TOFFSET_KNOWN),
FLAG(MPSP_OWNER),
FLAG(MPSP_RECIPIENT),
FLAG(PS_DELIVER),
FLAG(USES_ENCRYPTION),
FLAG(DECAP_OFFLOAD),
#undef FLAG
};
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
char buf[16 * NUM_WLAN_STA_FLAGS], *pos = buf;
char *end = buf + sizeof(buf) - 1;
struct sta_info *sta = file->private_data;
unsigned int flg;
BUILD_BUG_ON(ARRAY_SIZE(sta_flag_names) != NUM_WLAN_STA_FLAGS);
for (flg = 0; flg < NUM_WLAN_STA_FLAGS; flg++) {
if (test_sta_flag(sta, flg))
pos += scnprintf(pos, end - pos, "%s\n",
sta_flag_names[flg]);
}
return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
}
STA_OPS(flags);
static ssize_t sta_num_ps_buf_frames_read(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
char buf[17*IEEE80211_NUM_ACS], *p = buf;
int ac;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac,
skb_queue_len(&sta->ps_tx_buf[ac]) +
skb_queue_len(&sta->tx_filtered[ac]));
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
STA_OPS(num_ps_buf_frames);
static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
char buf[15*IEEE80211_NUM_TIDS], *p = buf;
int i;
struct sta_info *sta = file->private_data;
for (i = 0; i < IEEE80211_NUM_TIDS; i++)
p += scnprintf(p, sizeof(buf)+buf-p, "%x ",
le16_to_cpu(sta->last_seq_ctrl[i]));
p += scnprintf(p, sizeof(buf)+buf-p, "\n");
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
STA_OPS(last_seq_ctrl);
#define AQM_TXQ_ENTRY_LEN 130
static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
struct ieee80211_local *local = sta->local;
size_t bufsz = AQM_TXQ_ENTRY_LEN * (IEEE80211_NUM_TIDS + 2);
char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
struct txq_info *txqi;
ssize_t rv;
int i;
if (!buf)
return -ENOMEM;
spin_lock_bh(&local->fq.lock);
rcu_read_lock();
p += scnprintf(p,
bufsz + buf - p,
"target %uus interval %uus ecn %s\n",
codel_time_to_us(sta->cparams.target),
codel_time_to_us(sta->cparams.interval),
sta->cparams.ecn ? "yes" : "no");
p += scnprintf(p,
bufsz + buf - p,
"tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets flags\n");
for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
if (!sta->sta.txq[i])
continue;
txqi = to_txq_info(sta->sta.txq[i]);
p += scnprintf(p, bufsz + buf - p,
"%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s%s)\n",
txqi->txq.tid,
txqi->txq.ac,
txqi->tin.backlog_bytes,
txqi->tin.backlog_packets,
txqi->tin.flows,
txqi->cstats.drop_count,
txqi->cstats.ecn_mark,
txqi->tin.overlimit,
txqi->tin.collisions,
txqi->tin.tx_bytes,
txqi->tin.tx_packets,
txqi->flags,
test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN",
test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "",
test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "",
test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : "");
}
rcu_read_unlock();
spin_unlock_bh(&local->fq.lock);
rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
return rv;
}
STA_OPS(aqm);
static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
struct ieee80211_local *local = sta->sdata->local;
size_t bufsz = 400;
char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
u64 rx_airtime = 0, tx_airtime = 0;
s32 deficit[IEEE80211_NUM_ACS];
ssize_t rv;
int ac;
if (!buf)
return -ENOMEM;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
spin_lock_bh(&local->active_txq_lock[ac]);
rx_airtime += sta->airtime[ac].rx_airtime;
tx_airtime += sta->airtime[ac].tx_airtime;
deficit[ac] = sta->airtime[ac].deficit;
spin_unlock_bh(&local->active_txq_lock[ac]);
}
p += scnprintf(p, bufsz + buf - p,
"RX: %llu us\nTX: %llu us\nWeight: %u\n"
"Deficit: VO: %d us VI: %d us BE: %d us BK: %d us\n",
rx_airtime, tx_airtime, sta->airtime_weight,
deficit[0], deficit[1], deficit[2], deficit[3]);
rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
return rv;
}
static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
struct ieee80211_local *local = sta->sdata->local;
int ac;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
spin_lock_bh(&local->active_txq_lock[ac]);
sta->airtime[ac].rx_airtime = 0;
sta->airtime[ac].tx_airtime = 0;
sta->airtime[ac].deficit = sta->airtime_weight;
spin_unlock_bh(&local->active_txq_lock[ac]);
}
return count;
}
STA_OPS_RW(airtime);
static ssize_t sta_aql_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
struct ieee80211_local *local = sta->sdata->local;
size_t bufsz = 400;
char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
u32 q_depth[IEEE80211_NUM_ACS];
u32 q_limit_l[IEEE80211_NUM_ACS], q_limit_h[IEEE80211_NUM_ACS];
ssize_t rv;
int ac;
if (!buf)
return -ENOMEM;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
spin_lock_bh(&local->active_txq_lock[ac]);
q_limit_l[ac] = sta->airtime[ac].aql_limit_low;
q_limit_h[ac] = sta->airtime[ac].aql_limit_high;
spin_unlock_bh(&local->active_txq_lock[ac]);
q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending);
}
p += scnprintf(p, bufsz + buf - p,
"Q depth: VO: %u us VI: %u us BE: %u us BK: %u us\n"
"Q limit[low/high]: VO: %u/%u VI: %u/%u BE: %u/%u BK: %u/%u\n",
q_depth[0], q_depth[1], q_depth[2], q_depth[3],
q_limit_l[0], q_limit_h[0], q_limit_l[1], q_limit_h[1],
q_limit_l[2], q_limit_h[2], q_limit_l[3], q_limit_h[3]);
rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
return rv;
}
static ssize_t sta_aql_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
u32 ac, q_limit_l, q_limit_h;
char _buf[100] = {}, *buf = _buf;
if (count > sizeof(_buf))
return -EINVAL;
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[sizeof(_buf) - 1] = '\0';
if (sscanf(buf, "limit %u %u %u", &ac, &q_limit_l, &q_limit_h)
!= 3)
return -EINVAL;
if (ac >= IEEE80211_NUM_ACS)
return -EINVAL;
sta->airtime[ac].aql_limit_low = q_limit_l;
sta->airtime[ac].aql_limit_high = q_limit_h;
return count;
}
STA_OPS_RW(aql);
static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
char *buf, *p;
ssize_t bufsz = 71 + IEEE80211_NUM_TIDS * 40;
int i;
struct sta_info *sta = file->private_data;
struct tid_ampdu_rx *tid_rx;
struct tid_ampdu_tx *tid_tx;
ssize_t ret;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
p = buf;
rcu_read_lock();
p += scnprintf(p, bufsz + buf - p, "next dialog_token: %#02x\n",
sta->ampdu_mlme.dialog_token_allocator + 1);
p += scnprintf(p, bufsz + buf - p,
"TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
bool tid_rx_valid;
tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]);
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]);
tid_rx_valid = test_bit(i, sta->ampdu_mlme.agg_session_valid);
p += scnprintf(p, bufsz + buf - p, "%02d", i);
p += scnprintf(p, bufsz + buf - p, "\t\t%x",
tid_rx_valid);
p += scnprintf(p, bufsz + buf - p, "\t%#.2x",
tid_rx_valid ?
sta->ampdu_mlme.tid_rx_token[i] : 0);
p += scnprintf(p, bufsz + buf - p, "\t%#.3x",
tid_rx ? tid_rx->ssn : 0);
p += scnprintf(p, bufsz + buf - p, "\t\t%x", !!tid_tx);
p += scnprintf(p, bufsz + buf - p, "\t%#.2x",
tid_tx ? tid_tx->dialog_token : 0);
p += scnprintf(p, bufsz + buf - p, "\t%03d",
tid_tx ? skb_queue_len(&tid_tx->pending) : 0);
p += scnprintf(p, bufsz + buf - p, "\n");
}
rcu_read_unlock();
ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
return ret;
}
static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
char _buf[25] = {}, *buf = _buf;
struct sta_info *sta = file->private_data;
bool start, tx;
unsigned long tid;
char *pos;
int ret, timeout = 5000;
if (count > sizeof(_buf))
return -EINVAL;
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[sizeof(_buf) - 1] = '\0';
pos = buf;
buf = strsep(&pos, " ");
if (!buf)
return -EINVAL;
if (!strcmp(buf, "tx"))
tx = true;
else if (!strcmp(buf, "rx"))
tx = false;
else
return -EINVAL;
buf = strsep(&pos, " ");
if (!buf)
return -EINVAL;
if (!strcmp(buf, "start")) {
start = true;
if (!tx)
return -EINVAL;
} else if (!strcmp(buf, "stop")) {
start = false;
} else {
return -EINVAL;
}
buf = strsep(&pos, " ");
if (!buf)
return -EINVAL;
if (sscanf(buf, "timeout=%d", &timeout) == 1) {
buf = strsep(&pos, " ");
if (!buf || !tx || !start)
return -EINVAL;
}
ret = kstrtoul(buf, 0, &tid);
if (ret || tid >= IEEE80211_NUM_TIDS)
return -EINVAL;
if (tx) {
if (start)
ret = ieee80211_start_tx_ba_session(&sta->sta, tid,
timeout);
else
ret = ieee80211_stop_tx_ba_session(&sta->sta, tid);
} else {
__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
3, true);
ret = 0;
}
return ret ?: count;
}
STA_OPS_RW(agg_status);
/* link sta attributes */
#define LINK_STA_OPS(name) \
static const struct file_operations link_sta_ ##name## _ops = { \
.read = link_sta_##name##_read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
}
static ssize_t link_sta_addr_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct link_sta_info *link_sta = file->private_data;
u8 mac[3 * ETH_ALEN + 1];
snprintf(mac, sizeof(mac), "%pM\n", link_sta->pub->addr);
return simple_read_from_buffer(userbuf, count, ppos, mac, 3 * ETH_ALEN);
}
LINK_STA_OPS(addr);
static ssize_t link_sta_ht_capa_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
#define PRINT_HT_CAP(_cond, _str) \
do { \
if (_cond) \
p += scnprintf(p, bufsz + buf - p, "\t" _str "\n"); \
} while (0)
char *buf, *p;
int i;
ssize_t bufsz = 512;
struct link_sta_info *link_sta = file->private_data;
struct ieee80211_sta_ht_cap *htc = &link_sta->pub->ht_cap;
ssize_t ret;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
p = buf;
p += scnprintf(p, bufsz + buf - p, "ht %ssupported\n",
htc->ht_supported ? "" : "not ");
if (htc->ht_supported) {
p += scnprintf(p, bufsz + buf - p, "cap: %#.4x\n", htc->cap);
PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC");
PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save");
PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save");
PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled");
PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield");
PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI");
PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI");
PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC");
PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC");
PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream");
PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams");
PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams");
PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
"3839 bytes");
PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
"7935 bytes");
/*
* For beacons and probe response this would mean the BSS
* does or does not allow the usage of DSSS/CCK HT40.
* Otherwise it means the STA does or does not use
* DSSS/CCK HT40.
*/
PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40");
PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40");
/* BIT(13) is reserved */
PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant");
PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection");
p += scnprintf(p, bufsz + buf - p, "ampdu factor/density: %d/%d\n",
htc->ampdu_factor, htc->ampdu_density);
p += scnprintf(p, bufsz + buf - p, "MCS mask:");
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
p += scnprintf(p, bufsz + buf - p, " %.2x",
htc->mcs.rx_mask[i]);
p += scnprintf(p, bufsz + buf - p, "\n");
/* If not set this is meaningless */
if (le16_to_cpu(htc->mcs.rx_highest)) {
p += scnprintf(p, bufsz + buf - p,
"MCS rx highest: %d Mbps\n",
le16_to_cpu(htc->mcs.rx_highest));
}
p += scnprintf(p, bufsz + buf - p, "MCS tx params: %x\n",
htc->mcs.tx_params);
}
ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
return ret;
}
LINK_STA_OPS(ht_capa);
static ssize_t link_sta_vht_capa_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
char *buf, *p;
struct link_sta_info *link_sta = file->private_data;
struct ieee80211_sta_vht_cap *vhtc = &link_sta->pub->vht_cap;
ssize_t ret;
ssize_t bufsz = 512;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
p = buf;
p += scnprintf(p, bufsz + buf - p, "VHT %ssupported\n",
vhtc->vht_supported ? "" : "not ");
if (vhtc->vht_supported) {
p += scnprintf(p, bufsz + buf - p, "cap: %#.8x\n",
vhtc->cap);
#define PFLAG(a, b) \
do { \
if (vhtc->cap & IEEE80211_VHT_CAP_ ## a) \
p += scnprintf(p, bufsz + buf - p, \
"\t\t%s\n", b); \
} while (0)
switch (vhtc->cap & 0x3) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
p += scnprintf(p, bufsz + buf - p,
"\t\tMAX-MPDU-3895\n");
break;
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
p += scnprintf(p, bufsz + buf - p,
"\t\tMAX-MPDU-7991\n");
break;
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
p += scnprintf(p, bufsz + buf - p,
"\t\tMAX-MPDU-11454\n");
break;
default:
p += scnprintf(p, bufsz + buf - p,
"\t\tMAX-MPDU-UNKNOWN\n");
}
switch (vhtc->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case 0:
p += scnprintf(p, bufsz + buf - p,
"\t\t80Mhz\n");
break;
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
p += scnprintf(p, bufsz + buf - p,
"\t\t160Mhz\n");
break;
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
p += scnprintf(p, bufsz + buf - p,
"\t\t80+80Mhz\n");
break;
default:
p += scnprintf(p, bufsz + buf - p,
"\t\tUNKNOWN-MHZ: 0x%x\n",
(vhtc->cap >> 2) & 0x3);
}
PFLAG(RXLDPC, "RXLDPC");
PFLAG(SHORT_GI_80, "SHORT-GI-80");
PFLAG(SHORT_GI_160, "SHORT-GI-160");
PFLAG(TXSTBC, "TXSTBC");
p += scnprintf(p, bufsz + buf - p,
"\t\tRXSTBC_%d\n", (vhtc->cap >> 8) & 0x7);
PFLAG(SU_BEAMFORMER_CAPABLE, "SU-BEAMFORMER-CAPABLE");
PFLAG(SU_BEAMFORMEE_CAPABLE, "SU-BEAMFORMEE-CAPABLE");
p += scnprintf(p, bufsz + buf - p,
"\t\tBEAMFORMEE-STS: 0x%x\n",
(vhtc->cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK) >>
IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
p += scnprintf(p, bufsz + buf - p,
"\t\tSOUNDING-DIMENSIONS: 0x%x\n",
(vhtc->cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK)
>> IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT);
PFLAG(MU_BEAMFORMER_CAPABLE, "MU-BEAMFORMER-CAPABLE");
PFLAG(MU_BEAMFORMEE_CAPABLE, "MU-BEAMFORMEE-CAPABLE");
PFLAG(VHT_TXOP_PS, "TXOP-PS");
PFLAG(HTC_VHT, "HTC-VHT");
p += scnprintf(p, bufsz + buf - p,
"\t\tMPDU-LENGTH-EXPONENT: 0x%x\n",
(vhtc->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
PFLAG(VHT_LINK_ADAPTATION_VHT_UNSOL_MFB,
"LINK-ADAPTATION-VHT-UNSOL-MFB");
p += scnprintf(p, bufsz + buf - p,
"\t\tLINK-ADAPTATION-VHT-MRQ-MFB: 0x%x\n",
(vhtc->cap & IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB) >> 26);
PFLAG(RX_ANTENNA_PATTERN, "RX-ANTENNA-PATTERN");
PFLAG(TX_ANTENNA_PATTERN, "TX-ANTENNA-PATTERN");
p += scnprintf(p, bufsz + buf - p, "RX MCS: %.4x\n",
le16_to_cpu(vhtc->vht_mcs.rx_mcs_map));
if (vhtc->vht_mcs.rx_highest)
p += scnprintf(p, bufsz + buf - p,
"MCS RX highest: %d Mbps\n",
le16_to_cpu(vhtc->vht_mcs.rx_highest));
p += scnprintf(p, bufsz + buf - p, "TX MCS: %.4x\n",
le16_to_cpu(vhtc->vht_mcs.tx_mcs_map));
if (vhtc->vht_mcs.tx_highest)
p += scnprintf(p, bufsz + buf - p,
"MCS TX highest: %d Mbps\n",
le16_to_cpu(vhtc->vht_mcs.tx_highest));
#undef PFLAG
}
ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
return ret;
}
LINK_STA_OPS(vht_capa);
static ssize_t link_sta_he_capa_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
char *buf, *p;
size_t buf_sz = PAGE_SIZE;
struct link_sta_info *link_sta = file->private_data;
struct ieee80211_sta_he_cap *hec = &link_sta->pub->he_cap;
struct ieee80211_he_mcs_nss_supp *nss = &hec->he_mcs_nss_supp;
u8 ppe_size;
u8 *cap;
int i;
ssize_t ret;
buf = kmalloc(buf_sz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
p = buf;
p += scnprintf(p, buf_sz + buf - p, "HE %ssupported\n",
hec->has_he ? "" : "not ");
if (!hec->has_he)
goto out;
cap = hec->he_cap_elem.mac_cap_info;
p += scnprintf(p, buf_sz + buf - p,
"MAC-CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n",
cap[0], cap[1], cap[2], cap[3], cap[4], cap[5]);
#define PRINT(fmt, ...) \
p += scnprintf(p, buf_sz + buf - p, "\t\t" fmt "\n", \
##__VA_ARGS__)
#define PFLAG(t, n, a, b) \
do { \
if (cap[n] & IEEE80211_HE_##t##_CAP##n##_##a) \
PRINT("%s", b); \
} while (0)
#define PFLAG_RANGE(t, i, n, s, m, off, fmt) \
do { \
u8 msk = IEEE80211_HE_##t##_CAP##i##_##n##_MASK; \
u8 idx = ((cap[i] & msk) >> (ffs(msk) - 1)) + off; \
PRINT(fmt, (s << idx) + (m * idx)); \
} while (0)
#define PFLAG_RANGE_DEFAULT(t, i, n, s, m, off, fmt, a, b) \
do { \
if (cap[i] == IEEE80211_HE_##t ##_CAP##i##_##n##_##a) { \
PRINT("%s", b); \
break; \
} \
PFLAG_RANGE(t, i, n, s, m, off, fmt); \
} while (0)
PFLAG(MAC, 0, HTC_HE, "HTC-HE");
PFLAG(MAC, 0, TWT_REQ, "TWT-REQ");
PFLAG(MAC, 0, TWT_RES, "TWT-RES");
PFLAG_RANGE_DEFAULT(MAC, 0, DYNAMIC_FRAG, 0, 1, 0,
"DYNAMIC-FRAG-LEVEL-%d", NOT_SUPP, "NOT-SUPP");
PFLAG_RANGE_DEFAULT(MAC, 0, MAX_NUM_FRAG_MSDU, 1, 0, 0,
"MAX-NUM-FRAG-MSDU-%d", UNLIMITED, "UNLIMITED");
PFLAG_RANGE_DEFAULT(MAC, 1, MIN_FRAG_SIZE, 128, 0, -1,
"MIN-FRAG-SIZE-%d", UNLIMITED, "UNLIMITED");
PFLAG_RANGE_DEFAULT(MAC, 1, TF_MAC_PAD_DUR, 0, 8, 0,
"TF-MAC-PAD-DUR-%dUS", MASK, "UNKNOWN");
PFLAG_RANGE(MAC, 1, MULTI_TID_AGG_RX_QOS, 0, 1, 1,
"MULTI-TID-AGG-RX-QOS-%d");
if (cap[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) {
switch (((cap[2] << 1) | (cap[1] >> 7)) & 0x3) {
case 0:
PRINT("LINK-ADAPTATION-NO-FEEDBACK");
break;
case 1:
PRINT("LINK-ADAPTATION-RESERVED");
break;
case 2:
PRINT("LINK-ADAPTATION-UNSOLICITED-FEEDBACK");
break;
case 3:
PRINT("LINK-ADAPTATION-BOTH");
break;
}
}
PFLAG(MAC, 2, ALL_ACK, "ALL-ACK");
PFLAG(MAC, 2, TRS, "TRS");
PFLAG(MAC, 2, BSR, "BSR");
PFLAG(MAC, 2, BCAST_TWT, "BCAST-TWT");
PFLAG(MAC, 2, 32BIT_BA_BITMAP, "32BIT-BA-BITMAP");
PFLAG(MAC, 2, MU_CASCADING, "MU-CASCADING");
PFLAG(MAC, 2, ACK_EN, "ACK-EN");
PFLAG(MAC, 3, OMI_CONTROL, "OMI-CONTROL");
PFLAG(MAC, 3, OFDMA_RA, "OFDMA-RA");
switch (cap[3] & IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) {
case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0:
PRINT("MAX-AMPDU-LEN-EXP-USE-EXT-0");
break;
case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1:
PRINT("MAX-AMPDU-LEN-EXP-VHT-EXT-1");
break;
case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2:
PRINT("MAX-AMPDU-LEN-EXP-VHT-EXT-2");
break;
case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3:
PRINT("MAX-AMPDU-LEN-EXP-VHT-EXT-3");
break;
}
PFLAG(MAC, 3, AMSDU_FRAG, "AMSDU-FRAG");
PFLAG(MAC, 3, FLEX_TWT_SCHED, "FLEX-TWT-SCHED");
PFLAG(MAC, 3, RX_CTRL_FRAME_TO_MULTIBSS, "RX-CTRL-FRAME-TO-MULTIBSS");
PFLAG(MAC, 4, BSRP_BQRP_A_MPDU_AGG, "BSRP-BQRP-A-MPDU-AGG");
PFLAG(MAC, 4, QTP, "QTP");
PFLAG(MAC, 4, BQR, "BQR");
PFLAG(MAC, 4, PSR_RESP, "PSR-RESP");
PFLAG(MAC, 4, NDP_FB_REP, "NDP-FB-REP");
PFLAG(MAC, 4, OPS, "OPS");
PFLAG(MAC, 4, AMSDU_IN_AMPDU, "AMSDU-IN-AMPDU");
PRINT("MULTI-TID-AGG-TX-QOS-%d", ((cap[5] << 1) | (cap[4] >> 7)) & 0x7);
PFLAG(MAC, 5, SUBCHAN_SELECTIVE_TRANSMISSION,
"SUBCHAN-SELECTIVE-TRANSMISSION");
PFLAG(MAC, 5, UL_2x996_TONE_RU, "UL-2x996-TONE-RU");
PFLAG(MAC, 5, OM_CTRL_UL_MU_DATA_DIS_RX, "OM-CTRL-UL-MU-DATA-DIS-RX");
PFLAG(MAC, 5, HE_DYNAMIC_SM_PS, "HE-DYNAMIC-SM-PS");
PFLAG(MAC, 5, PUNCTURED_SOUNDING, "PUNCTURED-SOUNDING");
PFLAG(MAC, 5, HT_VHT_TRIG_FRAME_RX, "HT-VHT-TRIG-FRAME-RX");
cap = hec->he_cap_elem.phy_cap_info;
p += scnprintf(p, buf_sz + buf - p,
"PHY CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n",
cap[0], cap[1], cap[2], cap[3], cap[4], cap[5], cap[6],
cap[7], cap[8], cap[9], cap[10]);
PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_IN_2G,
"CHANNEL-WIDTH-SET-40MHZ-IN-2G");
PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G,
"CHANNEL-WIDTH-SET-40MHZ-80MHZ-IN-5G");
PFLAG(PHY, 0, CHANNEL_WIDTH_SET_160MHZ_IN_5G,
"CHANNEL-WIDTH-SET-160MHZ-IN-5G");
PFLAG(PHY, 0, CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
"CHANNEL-WIDTH-SET-80PLUS80-MHZ-IN-5G");
PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G,
"CHANNEL-WIDTH-SET-RU-MAPPING-IN-2G");
PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G,
"CHANNEL-WIDTH-SET-RU-MAPPING-IN-5G");
switch (cap[1] & IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK) {
case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ:
PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-20MHZ");
break;
case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ:
PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-40MHZ");
break;
case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ:
PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-20MHZ");
break;
case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ:
PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-40MHZ");
break;
}
PFLAG(PHY, 1, DEVICE_CLASS_A,
"IEEE80211-HE-PHY-CAP1-DEVICE-CLASS-A");
PFLAG(PHY, 1, LDPC_CODING_IN_PAYLOAD,
"LDPC-CODING-IN-PAYLOAD");
PFLAG(PHY, 1, HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US,
"HY-CAP1-HE-LTF-AND-GI-FOR-HE-PPDUS-0-8US");
PRINT("MIDAMBLE-RX-MAX-NSTS-%d", ((cap[2] << 1) | (cap[1] >> 7)) & 0x3);
PFLAG(PHY, 2, NDP_4x_LTF_AND_3_2US, "NDP-4X-LTF-AND-3-2US");
PFLAG(PHY, 2, STBC_TX_UNDER_80MHZ, "STBC-TX-UNDER-80MHZ");
PFLAG(PHY, 2, STBC_RX_UNDER_80MHZ, "STBC-RX-UNDER-80MHZ");
PFLAG(PHY, 2, DOPPLER_TX, "DOPPLER-TX");
PFLAG(PHY, 2, DOPPLER_RX, "DOPPLER-RX");
PFLAG(PHY, 2, UL_MU_FULL_MU_MIMO, "UL-MU-FULL-MU-MIMO");
PFLAG(PHY, 2, UL_MU_PARTIAL_MU_MIMO, "UL-MU-PARTIAL-MU-MIMO");
switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK) {
case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM:
PRINT("DCM-MAX-CONST-TX-NO-DCM");
break;
case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK:
PRINT("DCM-MAX-CONST-TX-BPSK");
break;
case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK:
PRINT("DCM-MAX-CONST-TX-QPSK");
break;
case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM:
PRINT("DCM-MAX-CONST-TX-16-QAM");
break;
}
PFLAG(PHY, 3, DCM_MAX_TX_NSS_1, "DCM-MAX-TX-NSS-1");
PFLAG(PHY, 3, DCM_MAX_TX_NSS_2, "DCM-MAX-TX-NSS-2");
switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK) {
case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM:
PRINT("DCM-MAX-CONST-RX-NO-DCM");
break;
case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK:
PRINT("DCM-MAX-CONST-RX-BPSK");
break;
case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK:
PRINT("DCM-MAX-CONST-RX-QPSK");
break;
case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM:
PRINT("DCM-MAX-CONST-RX-16-QAM");
break;
}
PFLAG(PHY, 3, DCM_MAX_RX_NSS_1, "DCM-MAX-RX-NSS-1");
PFLAG(PHY, 3, DCM_MAX_RX_NSS_2, "DCM-MAX-RX-NSS-2");
PFLAG(PHY, 3, RX_PARTIAL_BW_SU_IN_20MHZ_MU,
"RX-PARTIAL-BW-SU-IN-20MHZ-MU");
PFLAG(PHY, 3, SU_BEAMFORMER, "SU-BEAMFORMER");
PFLAG(PHY, 4, SU_BEAMFORMEE, "SU-BEAMFORMEE");
PFLAG(PHY, 4, MU_BEAMFORMER, "MU-BEAMFORMER");
PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_UNDER_80MHZ, 0, 1, 4,
"BEAMFORMEE-MAX-STS-UNDER-%d");
PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_ABOVE_80MHZ, 0, 1, 4,
"BEAMFORMEE-MAX-STS-ABOVE-%d");
PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ, 0, 1, 1,
"NUM-SND-DIM-UNDER-80MHZ-%d");
PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ, 0, 1, 1,
"NUM-SND-DIM-ABOVE-80MHZ-%d");
PFLAG(PHY, 5, NG16_SU_FEEDBACK, "NG16-SU-FEEDBACK");
PFLAG(PHY, 5, NG16_MU_FEEDBACK, "NG16-MU-FEEDBACK");
PFLAG(PHY, 6, CODEBOOK_SIZE_42_SU, "CODEBOOK-SIZE-42-SU");
PFLAG(PHY, 6, CODEBOOK_SIZE_75_MU, "CODEBOOK-SIZE-75-MU");
PFLAG(PHY, 6, TRIG_SU_BEAMFORMING_FB, "TRIG-SU-BEAMFORMING-FB");
PFLAG(PHY, 6, TRIG_MU_BEAMFORMING_PARTIAL_BW_FB,
"MU-BEAMFORMING-PARTIAL-BW-FB");
PFLAG(PHY, 6, TRIG_CQI_FB, "TRIG-CQI-FB");
PFLAG(PHY, 6, PARTIAL_BW_EXT_RANGE, "PARTIAL-BW-EXT-RANGE");
PFLAG(PHY, 6, PARTIAL_BANDWIDTH_DL_MUMIMO,
"PARTIAL-BANDWIDTH-DL-MUMIMO");
PFLAG(PHY, 6, PPE_THRESHOLD_PRESENT, "PPE-THRESHOLD-PRESENT");
PFLAG(PHY, 7, PSR_BASED_SR, "PSR-BASED-SR");
PFLAG(PHY, 7, POWER_BOOST_FACTOR_SUPP, "POWER-BOOST-FACTOR-SUPP");
PFLAG(PHY, 7, HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
"HE-SU-MU-PPDU-4XLTF-AND-08-US-GI");
PFLAG_RANGE(PHY, 7, MAX_NC, 0, 1, 1, "MAX-NC-%d");
PFLAG(PHY, 7, STBC_TX_ABOVE_80MHZ, "STBC-TX-ABOVE-80MHZ");
PFLAG(PHY, 7, STBC_RX_ABOVE_80MHZ, "STBC-RX-ABOVE-80MHZ");
PFLAG(PHY, 8, HE_ER_SU_PPDU_4XLTF_AND_08_US_GI,
"HE-ER-SU-PPDU-4XLTF-AND-08-US-GI");
PFLAG(PHY, 8, 20MHZ_IN_40MHZ_HE_PPDU_IN_2G,
"20MHZ-IN-40MHZ-HE-PPDU-IN-2G");
PFLAG(PHY, 8, 20MHZ_IN_160MHZ_HE_PPDU, "20MHZ-IN-160MHZ-HE-PPDU");
PFLAG(PHY, 8, 80MHZ_IN_160MHZ_HE_PPDU, "80MHZ-IN-160MHZ-HE-PPDU");
PFLAG(PHY, 8, HE_ER_SU_1XLTF_AND_08_US_GI,
"HE-ER-SU-1XLTF-AND-08-US-GI");
PFLAG(PHY, 8, MIDAMBLE_RX_TX_2X_AND_1XLTF,
"MIDAMBLE-RX-TX-2X-AND-1XLTF");
switch (cap[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK) {
case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242:
PRINT("DCM-MAX-RU-242");
break;
case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484:
PRINT("DCM-MAX-RU-484");
break;
case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996:
PRINT("DCM-MAX-RU-996");
break;
case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996:
PRINT("DCM-MAX-RU-2x996");
break;
}
PFLAG(PHY, 9, LONGER_THAN_16_SIGB_OFDM_SYM,
"LONGER-THAN-16-SIGB-OFDM-SYM");
PFLAG(PHY, 9, NON_TRIGGERED_CQI_FEEDBACK,
"NON-TRIGGERED-CQI-FEEDBACK");
PFLAG(PHY, 9, TX_1024_QAM_LESS_THAN_242_TONE_RU,
"TX-1024-QAM-LESS-THAN-242-TONE-RU");
PFLAG(PHY, 9, RX_1024_QAM_LESS_THAN_242_TONE_RU,
"RX-1024-QAM-LESS-THAN-242-TONE-RU");
PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB,
"RX-FULL-BW-SU-USING-MU-WITH-COMP-SIGB");
PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB,
"RX-FULL-BW-SU-USING-MU-WITH-NON-COMP-SIGB");
switch (u8_get_bits(cap[9],
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) {
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
PRINT("NOMINAL-PACKET-PADDING-0US");
break;
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
PRINT("NOMINAL-PACKET-PADDING-8US");
break;
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
PRINT("NOMINAL-PACKET-PADDING-16US");
break;
}
#undef PFLAG_RANGE_DEFAULT
#undef PFLAG_RANGE
#undef PFLAG
#define PRINT_NSS_SUPP(f, n) \
do { \
int _i; \
u16 v = le16_to_cpu(nss->f); \
p += scnprintf(p, buf_sz + buf - p, n ": %#.4x\n", v); \
for (_i = 0; _i < 8; _i += 2) { \
switch ((v >> _i) & 0x3) { \
case 0: \
PRINT(n "-%d-SUPPORT-0-7", _i / 2); \
break; \
case 1: \
PRINT(n "-%d-SUPPORT-0-9", _i / 2); \
break; \
case 2: \
PRINT(n "-%d-SUPPORT-0-11", _i / 2); \
break; \
case 3: \
PRINT(n "-%d-NOT-SUPPORTED", _i / 2); \
break; \
} \
} \
} while (0)
PRINT_NSS_SUPP(rx_mcs_80, "RX-MCS-80");
PRINT_NSS_SUPP(tx_mcs_80, "TX-MCS-80");
if (cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) {
PRINT_NSS_SUPP(rx_mcs_160, "RX-MCS-160");
PRINT_NSS_SUPP(tx_mcs_160, "TX-MCS-160");
}
if (cap[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
PRINT_NSS_SUPP(rx_mcs_80p80, "RX-MCS-80P80");
PRINT_NSS_SUPP(tx_mcs_80p80, "TX-MCS-80P80");
}
#undef PRINT_NSS_SUPP
#undef PRINT
if (!(cap[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT))
goto out;
p += scnprintf(p, buf_sz + buf - p, "PPE-THRESHOLDS: %#.2x",
hec->ppe_thres[0]);
ppe_size = ieee80211_he_ppe_size(hec->ppe_thres[0], cap);
for (i = 1; i < ppe_size; i++) {
p += scnprintf(p, buf_sz + buf - p, " %#.2x",
hec->ppe_thres[i]);
}
p += scnprintf(p, buf_sz + buf - p, "\n");
out:
ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
return ret;
}
LINK_STA_OPS(he_capa);
static ssize_t link_sta_eht_capa_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
char *buf, *p;
size_t buf_sz = PAGE_SIZE;
struct link_sta_info *link_sta = file->private_data;
struct ieee80211_sta_eht_cap *bec = &link_sta->pub->eht_cap;
struct ieee80211_eht_cap_elem_fixed *fixed = &bec->eht_cap_elem;
struct ieee80211_eht_mcs_nss_supp *nss = &bec->eht_mcs_nss_supp;
u8 *cap;
int i;
ssize_t ret;
static const char *mcs_desc[] = { "0-7", "8-9", "10-11", "12-13"};
buf = kmalloc(buf_sz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
p = buf;
p += scnprintf(p, buf_sz + buf - p, "EHT %ssupported\n",
bec->has_eht ? "" : "not ");
if (!bec->has_eht)
goto out;
p += scnprintf(p, buf_sz + buf - p,
"MAC-CAP: %#.2x %#.2x\n",
fixed->mac_cap_info[0], fixed->mac_cap_info[1]);
p += scnprintf(p, buf_sz + buf - p,
"PHY-CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n",
fixed->phy_cap_info[0], fixed->phy_cap_info[1],
fixed->phy_cap_info[2], fixed->phy_cap_info[3],
fixed->phy_cap_info[4], fixed->phy_cap_info[5],
fixed->phy_cap_info[6], fixed->phy_cap_info[7],
fixed->phy_cap_info[8]);
#define PRINT(fmt, ...) \
p += scnprintf(p, buf_sz + buf - p, "\t\t" fmt "\n", \
##__VA_ARGS__)
#define PFLAG(t, n, a, b) \
do { \
if (cap[n] & IEEE80211_EHT_##t##_CAP##n##_##a) \
PRINT("%s", b); \
} while (0)
cap = fixed->mac_cap_info;
PFLAG(MAC, 0, EPCS_PRIO_ACCESS, "EPCS-PRIO-ACCESS");
PFLAG(MAC, 0, OM_CONTROL, "OM-CONTROL");
PFLAG(MAC, 0, TRIG_TXOP_SHARING_MODE1, "TRIG-TXOP-SHARING-MODE1");
PFLAG(MAC, 0, TRIG_TXOP_SHARING_MODE2, "TRIG-TXOP-SHARING-MODE2");
PFLAG(MAC, 0, RESTRICTED_TWT, "RESTRICTED-TWT");
PFLAG(MAC, 0, SCS_TRAFFIC_DESC, "SCS-TRAFFIC-DESC");
switch ((cap[0] & 0xc0) >> 6) {
case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895:
PRINT("MAX-MPDU-LEN: 3985");
break;
case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991:
PRINT("MAX-MPDU-LEN: 7991");
break;
case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454:
PRINT("MAX-MPDU-LEN: 11454");
break;
}
cap = fixed->phy_cap_info;
PFLAG(PHY, 0, 320MHZ_IN_6GHZ, "320MHZ-IN-6GHZ");
PFLAG(PHY, 0, 242_TONE_RU_GT20MHZ, "242-TONE-RU-GT20MHZ");
PFLAG(PHY, 0, NDP_4_EHT_LFT_32_GI, "NDP-4-EHT-LFT-32-GI");
PFLAG(PHY, 0, PARTIAL_BW_UL_MU_MIMO, "PARTIAL-BW-UL-MU-MIMO");
PFLAG(PHY, 0, SU_BEAMFORMER, "SU-BEAMFORMER");
PFLAG(PHY, 0, SU_BEAMFORMEE, "SU-BEAMFORMEE");
i = cap[0] >> 7;
i |= (cap[1] & 0x3) << 1;
PRINT("BEAMFORMEE-80-NSS: %i", i);
PRINT("BEAMFORMEE-160-NSS: %i", (cap[1] >> 2) & 0x7);
PRINT("BEAMFORMEE-320-NSS: %i", (cap[1] >> 5) & 0x7);
PRINT("SOUNDING-DIM-80-NSS: %i", (cap[2] & 0x7));
PRINT("SOUNDING-DIM-160-NSS: %i", (cap[2] >> 3) & 0x7);
i = cap[2] >> 6;
i |= (cap[3] & 0x1) << 3;
PRINT("SOUNDING-DIM-320-NSS: %i", i);
PFLAG(PHY, 3, NG_16_SU_FEEDBACK, "NG-16-SU-FEEDBACK");
PFLAG(PHY, 3, NG_16_MU_FEEDBACK, "NG-16-MU-FEEDBACK");
PFLAG(PHY, 3, CODEBOOK_4_2_SU_FDBK, "CODEBOOK-4-2-SU-FDBK");
PFLAG(PHY, 3, CODEBOOK_7_5_MU_FDBK, "CODEBOOK-7-5-MU-FDBK");
PFLAG(PHY, 3, TRIG_SU_BF_FDBK, "TRIG-SU-BF-FDBK");
PFLAG(PHY, 3, TRIG_MU_BF_PART_BW_FDBK, "TRIG-MU-BF-PART-BW-FDBK");
PFLAG(PHY, 3, TRIG_CQI_FDBK, "TRIG-CQI-FDBK");
PFLAG(PHY, 4, PART_BW_DL_MU_MIMO, "PART-BW-DL-MU-MIMO");
PFLAG(PHY, 4, PSR_SR_SUPP, "PSR-SR-SUPP");
PFLAG(PHY, 4, POWER_BOOST_FACT_SUPP, "POWER-BOOST-FACT-SUPP");
PFLAG(PHY, 4, EHT_MU_PPDU_4_EHT_LTF_08_GI, "EHT-MU-PPDU-4-EHT-LTF-08-GI");
PRINT("MAX_NC: %i", cap[4] >> 4);
PFLAG(PHY, 5, NON_TRIG_CQI_FEEDBACK, "NON-TRIG-CQI-FEEDBACK");
PFLAG(PHY, 5, TX_LESS_242_TONE_RU_SUPP, "TX-LESS-242-TONE-RU-SUPP");
PFLAG(PHY, 5, RX_LESS_242_TONE_RU_SUPP, "RX-LESS-242-TONE-RU-SUPP");
PFLAG(PHY, 5, PPE_THRESHOLD_PRESENT, "PPE_THRESHOLD_PRESENT");
switch (cap[5] >> 4 & 0x3) {
case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US:
PRINT("NOMINAL_PKT_PAD: 0us");
break;
case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US:
PRINT("NOMINAL_PKT_PAD: 8us");
break;
case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US:
PRINT("NOMINAL_PKT_PAD: 16us");
break;
case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US:
PRINT("NOMINAL_PKT_PAD: 20us");
break;
}
i = cap[5] >> 6;
i |= cap[6] & 0x7;
PRINT("MAX-NUM-SUPP-EHT-LTF: %i", i);
PFLAG(PHY, 5, SUPP_EXTRA_EHT_LTF, "SUPP-EXTRA-EHT-LTF");
i = (cap[6] >> 3) & 0xf;
PRINT("MCS15-SUPP-MASK: %i", i);
PFLAG(PHY, 6, EHT_DUP_6GHZ_SUPP, "EHT-DUP-6GHZ-SUPP");
PFLAG(PHY, 7, 20MHZ_STA_RX_NDP_WIDER_BW, "20MHZ-STA-RX-NDP-WIDER-BW");
PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_80MHZ, "NON-OFDMA-UL-MU-MIMO-80MHZ");
PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_160MHZ, "NON-OFDMA-UL-MU-MIMO-160MHZ");
PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_320MHZ, "NON-OFDMA-UL-MU-MIMO-320MHZ");
PFLAG(PHY, 7, MU_BEAMFORMER_80MHZ, "MU-BEAMFORMER-80MHZ");
PFLAG(PHY, 7, MU_BEAMFORMER_160MHZ, "MU-BEAMFORMER-160MHZ");
PFLAG(PHY, 7, MU_BEAMFORMER_320MHZ, "MU-BEAMFORMER-320MHZ");
PFLAG(PHY, 7, TB_SOUNDING_FDBK_RATE_LIMIT, "TB-SOUNDING-FDBK-RATE-LIMIT");
PFLAG(PHY, 8, RX_1024QAM_WIDER_BW_DL_OFDMA, "RX-1024QAM-WIDER-BW-DL-OFDMA");
PFLAG(PHY, 8, RX_4096QAM_WIDER_BW_DL_OFDMA, "RX-4096QAM-WIDER-BW-DL-OFDMA");
#undef PFLAG
PRINT(""); /* newline */
if (!(link_sta->pub->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
u8 *mcs_vals = (u8 *)(&nss->only_20mhz);
for (i = 0; i < 4; i++)
PRINT("EHT bw=20 MHz, max NSS for MCS %s: Rx=%u, Tx=%u",
mcs_desc[i],
mcs_vals[i] & 0xf, mcs_vals[i] >> 4);
} else {
u8 *mcs_vals = (u8 *)(&nss->bw._80);
for (i = 0; i < 3; i++)
PRINT("EHT bw <= 80 MHz, max NSS for MCS %s: Rx=%u, Tx=%u",
mcs_desc[i + 1],
mcs_vals[i] & 0xf, mcs_vals[i] >> 4);
mcs_vals = (u8 *)(&nss->bw._160);
for (i = 0; i < 3; i++)
PRINT("EHT bw <= 160 MHz, max NSS for MCS %s: Rx=%u, Tx=%u",
mcs_desc[i + 1],
mcs_vals[i] & 0xf, mcs_vals[i] >> 4);
mcs_vals = (u8 *)(&nss->bw._320);
for (i = 0; i < 3; i++)
PRINT("EHT bw <= 320 MHz, max NSS for MCS %s: Rx=%u, Tx=%u",
mcs_desc[i + 1],
mcs_vals[i] & 0xf, mcs_vals[i] >> 4);
}
if (cap[5] & IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
u8 ppe_size = ieee80211_eht_ppe_size(bec->eht_ppe_thres[0], cap);
p += scnprintf(p, buf_sz + buf - p, "EHT PPE Thresholds: ");
for (i = 0; i < ppe_size; i++)
p += scnprintf(p, buf_sz + buf - p, "0x%02x ",
bec->eht_ppe_thres[i]);
PRINT(""); /* newline */
}
out:
ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
return ret;
}
LINK_STA_OPS(eht_capa);
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, \
sta->debugfs_dir, sta, &sta_ ##name## _ops)
#define DEBUGFS_ADD_COUNTER(name, field) \
debugfs_create_ulong(#name, 0400, sta->debugfs_dir, &sta->field);
void ieee80211_sta_debugfs_add(struct sta_info *sta)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations;
u8 mac[3*ETH_ALEN];
if (!stations_dir)
return;
snprintf(mac, sizeof(mac), "%pM", sta->sta.addr);
/*
* This might fail due to a race condition:
* When mac80211 unlinks a station, the debugfs entries
* remain, but it is already possible to link a new
* station with the same address which triggers adding
* it to debugfs; therefore, if the old station isn't
* destroyed quickly enough the old station's debugfs
* dir might still be around.
*/
sta->debugfs_dir = debugfs_create_dir(mac, stations_dir);
DEBUGFS_ADD(flags);
DEBUGFS_ADD(aid);
DEBUGFS_ADD(num_ps_buf_frames);
DEBUGFS_ADD(last_seq_ctrl);
DEBUGFS_ADD(agg_status);
/* FIXME: Kept here as the statistics are only done on the deflink */
DEBUGFS_ADD_COUNTER(tx_filtered, deflink.status_stats.filtered);
DEBUGFS_ADD(aqm);
DEBUGFS_ADD(airtime);
if (wiphy_ext_feature_isset(local->hw.wiphy,
NL80211_EXT_FEATURE_AQL))
DEBUGFS_ADD(aql);
debugfs_create_xul("driver_buffered_tids", 0400, sta->debugfs_dir,
&sta->driver_buffered_tids);
drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs_dir);
}
void ieee80211_sta_debugfs_remove(struct sta_info *sta)
{
debugfs_remove_recursive(sta->debugfs_dir);
sta->debugfs_dir = NULL;
}
#undef DEBUGFS_ADD
#undef DEBUGFS_ADD_COUNTER
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, \
link_sta->debugfs_dir, link_sta, &link_sta_ ##name## _ops)
#define DEBUGFS_ADD_COUNTER(name, field) \
debugfs_create_ulong(#name, 0400, link_sta->debugfs_dir, &link_sta->field)
void ieee80211_link_sta_debugfs_add(struct link_sta_info *link_sta)
{
if (WARN_ON(!link_sta->sta->debugfs_dir))
return;
/* For non-MLO, leave the files in the main directory. */
if (link_sta->sta->sta.valid_links) {
char link_dir_name[10];
snprintf(link_dir_name, sizeof(link_dir_name),
"link-%d", link_sta->link_id);
link_sta->debugfs_dir =
debugfs_create_dir(link_dir_name,
link_sta->sta->debugfs_dir);
DEBUGFS_ADD(addr);
} else {
if (WARN_ON(link_sta != &link_sta->sta->deflink))
return;
link_sta->debugfs_dir = link_sta->sta->debugfs_dir;
}
DEBUGFS_ADD(ht_capa);
DEBUGFS_ADD(vht_capa);
DEBUGFS_ADD(he_capa);
DEBUGFS_ADD(eht_capa);
DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates);
DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
}
void ieee80211_link_sta_debugfs_remove(struct link_sta_info *link_sta)
{
if (!link_sta->debugfs_dir || !link_sta->sta->debugfs_dir) {
link_sta->debugfs_dir = NULL;
return;
}
if (link_sta->debugfs_dir == link_sta->sta->debugfs_dir) {
WARN_ON(link_sta != &link_sta->sta->deflink);
link_sta->sta->debugfs_dir = NULL;
return;
}
debugfs_remove_recursive(link_sta->debugfs_dir);
link_sta->debugfs_dir = NULL;
}
void ieee80211_link_sta_debugfs_drv_add(struct link_sta_info *link_sta)
{
if (WARN_ON(!link_sta->debugfs_dir))
return;
drv_link_sta_add_debugfs(link_sta->sta->local, link_sta->sta->sdata,
link_sta->pub, link_sta->debugfs_dir);
}
void ieee80211_link_sta_debugfs_drv_remove(struct link_sta_info *link_sta)
{
if (!link_sta->debugfs_dir)
return;
if (WARN_ON(link_sta->debugfs_dir == link_sta->sta->debugfs_dir))
return;
/* Recreate the directory excluding the driver data */
debugfs_remove_recursive(link_sta->debugfs_dir);
link_sta->debugfs_dir = NULL;
ieee80211_link_sta_debugfs_add(link_sta);
}
| linux-master | net/mac80211/debugfs_sta.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HT handling
*
* Copyright 2003, Jouni Malinen <[email protected]>
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2007-2010, Intel Corporation
* Copyright(c) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2022 Intel Corporation
*/
/**
* DOC: RX A-MPDU aggregation
*
* Aggregation on the RX side requires only implementing the
* @ampdu_action callback that is invoked to start/stop any
* block-ack sessions for RX aggregation.
*
* When RX aggregation is started by the peer, the driver is
* notified via @ampdu_action function, with the
* %IEEE80211_AMPDU_RX_START action, and may reject the request
* in which case a negative response is sent to the peer, if it
* accepts it a positive response is sent.
*
* While the session is active, the device/driver are required
* to de-aggregate frames and pass them up one by one to mac80211,
* which will handle the reorder buffer.
*
* When the aggregation session is stopped again by the peer or
* ourselves, the driver's @ampdu_action function will be called
* with the action %IEEE80211_AMPDU_RX_STOP. In this case, the
* call must not fail.
*/
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
static void ieee80211_free_tid_rx(struct rcu_head *h)
{
struct tid_ampdu_rx *tid_rx =
container_of(h, struct tid_ampdu_rx, rcu_head);
int i;
for (i = 0; i < tid_rx->buf_size; i++)
__skb_queue_purge(&tid_rx->reorder_buf[i]);
kfree(tid_rx->reorder_buf);
kfree(tid_rx->reorder_time);
kfree(tid_rx);
}
void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason, bool tx)
{
struct ieee80211_local *local = sta->local;
struct tid_ampdu_rx *tid_rx;
struct ieee80211_ampdu_params params = {
.sta = &sta->sta,
.action = IEEE80211_AMPDU_RX_STOP,
.tid = tid,
.amsdu = false,
.timeout = 0,
.ssn = 0,
};
lockdep_assert_held(&sta->ampdu_mlme.mtx);
tid_rx = rcu_dereference_protected(sta->ampdu_mlme.tid_rx[tid],
lockdep_is_held(&sta->ampdu_mlme.mtx));
if (!test_bit(tid, sta->ampdu_mlme.agg_session_valid))
return;
RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
__clear_bit(tid, sta->ampdu_mlme.agg_session_valid);
ht_dbg(sta->sdata,
"Rx BA session stop requested for %pM tid %u %s reason: %d\n",
sta->sta.addr, tid,
initiator == WLAN_BACK_RECIPIENT ? "recipient" : "initiator",
(int)reason);
if (drv_ampdu_action(local, sta->sdata, ¶ms))
sdata_info(sta->sdata,
"HW problem - can not stop rx aggregation for %pM tid %d\n",
sta->sta.addr, tid);
/* check if this is a self generated aggregation halt */
if (initiator == WLAN_BACK_RECIPIENT && tx)
ieee80211_send_delba(sta->sdata, sta->sta.addr,
tid, WLAN_BACK_RECIPIENT, reason);
/*
* return here in case tid_rx is not assigned - which will happen if
* IEEE80211_HW_SUPPORTS_REORDERING_BUFFER is set.
*/
if (!tid_rx)
return;
del_timer_sync(&tid_rx->session_timer);
/* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
spin_lock_bh(&tid_rx->reorder_lock);
tid_rx->removed = true;
spin_unlock_bh(&tid_rx->reorder_lock);
del_timer_sync(&tid_rx->reorder_timer);
call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
}
void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason, bool tx)
{
mutex_lock(&sta->ampdu_mlme.mtx);
___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, tx);
mutex_unlock(&sta->ampdu_mlme.mtx);
}
void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
const u8 *addr)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct sta_info *sta;
int i;
rcu_read_lock();
sta = sta_info_get_bss(sdata, addr);
if (!sta) {
rcu_read_unlock();
return;
}
for (i = 0; i < IEEE80211_NUM_TIDS; i++)
if (ba_rx_bitmap & BIT(i))
set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested);
ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_stop_rx_ba_session);
/*
* After accepting the AddBA Request we activated a timer,
* resetting it after each frame that arrives from the originator.
*/
static void sta_rx_agg_session_timer_expired(struct timer_list *t)
{
struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, session_timer);
struct sta_info *sta = tid_rx->sta;
u8 tid = tid_rx->tid;
unsigned long timeout;
timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
if (time_is_after_jiffies(timeout)) {
mod_timer(&tid_rx->session_timer, timeout);
return;
}
ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n",
sta->sta.addr, tid);
set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired);
ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
}
static void sta_rx_agg_reorder_timer_expired(struct timer_list *t)
{
struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, reorder_timer);
rcu_read_lock();
ieee80211_release_reorder_timeout(tid_rx->sta, tid_rx->tid);
rcu_read_unlock();
}
static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
const struct ieee80211_addba_ext_ie *req,
u16 buf_size)
{
struct ieee80211_addba_ext_ie *resp;
u8 *pos;
pos = skb_put_zero(skb, 2 + sizeof(struct ieee80211_addba_ext_ie));
*pos++ = WLAN_EID_ADDBA_EXT;
*pos++ = sizeof(struct ieee80211_addba_ext_ie);
resp = (struct ieee80211_addba_ext_ie *)pos;
resp->data = req->data & IEEE80211_ADDBA_EXT_NO_FRAG;
resp->data |= u8_encode_bits(buf_size >> IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT,
IEEE80211_ADDBA_EXT_BUF_SIZE_MASK);
}
static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid,
u8 dialog_token, u16 status, u16 policy,
u16 buf_size, u16 timeout,
const struct ieee80211_addba_ext_ie *addbaext)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
bool amsdu = ieee80211_hw_check(&local->hw, SUPPORTS_AMSDU_IN_AMPDU);
u16 capab;
skb = dev_alloc_skb(sizeof(*mgmt) +
2 + sizeof(struct ieee80211_addba_ext_ie) +
local->hw.extra_tx_headroom);
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = skb_put_zero(skb, 24);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp));
mgmt->u.action.category = WLAN_CATEGORY_BACK;
mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
capab = u16_encode_bits(amsdu, IEEE80211_ADDBA_PARAM_AMSDU_MASK);
capab |= u16_encode_bits(policy, IEEE80211_ADDBA_PARAM_POLICY_MASK);
capab |= u16_encode_bits(tid, IEEE80211_ADDBA_PARAM_TID_MASK);
capab |= u16_encode_bits(buf_size, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab);
mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
if (sta->sta.deflink.he_cap.has_he && addbaext)
ieee80211_add_addbaext(sdata, skb, addbaext, buf_size);
ieee80211_tx_skb(sdata, skb);
}
void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
u8 dialog_token, u16 timeout,
u16 start_seq_num, u16 ba_policy, u16 tid,
u16 buf_size, bool tx, bool auto_seq,
const struct ieee80211_addba_ext_ie *addbaext)
{
struct ieee80211_local *local = sta->sdata->local;
struct tid_ampdu_rx *tid_agg_rx;
struct ieee80211_ampdu_params params = {
.sta = &sta->sta,
.action = IEEE80211_AMPDU_RX_START,
.tid = tid,
.amsdu = false,
.timeout = timeout,
.ssn = start_seq_num,
};
int i, ret = -EOPNOTSUPP;
u16 status = WLAN_STATUS_REQUEST_DECLINED;
u16 max_buf_size;
if (tid >= IEEE80211_FIRST_TSPEC_TSID) {
ht_dbg(sta->sdata,
"STA %pM requests BA session on unsupported tid %d\n",
sta->sta.addr, tid);
goto end;
}
if (!sta->sta.deflink.ht_cap.ht_supported &&
!sta->sta.deflink.he_cap.has_he) {
ht_dbg(sta->sdata,
"STA %pM erroneously requests BA session on tid %d w/o HT\n",
sta->sta.addr, tid);
/* send a response anyway, it's an error case if we get here */
goto end;
}
if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
ht_dbg(sta->sdata,
"Suspend in progress - Denying ADDBA request (%pM tid %d)\n",
sta->sta.addr, tid);
goto end;
}
if (sta->sta.deflink.eht_cap.has_eht)
max_buf_size = IEEE80211_MAX_AMPDU_BUF_EHT;
else if (sta->sta.deflink.he_cap.has_he)
max_buf_size = IEEE80211_MAX_AMPDU_BUF_HE;
else
max_buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
/* sanity check for incoming parameters:
* check if configuration can support the BA policy
* and if buffer size does not exceeds max value */
/* XXX: check own ht delayed BA capability?? */
if (((ba_policy != 1) &&
(!(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
(buf_size > max_buf_size)) {
status = WLAN_STATUS_INVALID_QOS_PARAM;
ht_dbg_ratelimited(sta->sdata,
"AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
sta->sta.addr, tid, ba_policy, buf_size);
goto end;
}
/* determine default buffer size */
if (buf_size == 0)
buf_size = max_buf_size;
/* make sure the size doesn't exceed the maximum supported by the hw */
if (buf_size > sta->sta.max_rx_aggregation_subframes)
buf_size = sta->sta.max_rx_aggregation_subframes;
params.buf_size = buf_size;
ht_dbg(sta->sdata, "AddBA Req buf_size=%d for %pM\n",
buf_size, sta->sta.addr);
/* examine state machine */
lockdep_assert_held(&sta->ampdu_mlme.mtx);
if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) {
if (sta->ampdu_mlme.tid_rx_token[tid] == dialog_token) {
struct tid_ampdu_rx *tid_rx;
ht_dbg_ratelimited(sta->sdata,
"updated AddBA Req from %pM on tid %u\n",
sta->sta.addr, tid);
/* We have no API to update the timeout value in the
* driver so reject the timeout update if the timeout
* changed. If it did not change, i.e., no real update,
* just reply with success.
*/
rcu_read_lock();
tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
if (tid_rx && tid_rx->timeout == timeout)
status = WLAN_STATUS_SUCCESS;
else
status = WLAN_STATUS_REQUEST_DECLINED;
rcu_read_unlock();
goto end;
}
ht_dbg_ratelimited(sta->sdata,
"unexpected AddBA Req from %pM on tid %u\n",
sta->sta.addr, tid);
/* delete existing Rx BA session on the same tid */
___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
WLAN_STATUS_UNSPECIFIED_QOS,
false);
}
if (ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) {
ret = drv_ampdu_action(local, sta->sdata, ¶ms);
ht_dbg(sta->sdata,
"Rx A-MPDU request on %pM tid %d result %d\n",
sta->sta.addr, tid, ret);
if (!ret)
status = WLAN_STATUS_SUCCESS;
goto end;
}
/* prepare A-MPDU MLME for Rx aggregation */
tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
if (!tid_agg_rx)
goto end;
spin_lock_init(&tid_agg_rx->reorder_lock);
/* rx timer */
timer_setup(&tid_agg_rx->session_timer,
sta_rx_agg_session_timer_expired, TIMER_DEFERRABLE);
/* rx reorder timer */
timer_setup(&tid_agg_rx->reorder_timer,
sta_rx_agg_reorder_timer_expired, 0);
/* prepare reordering buffer */
tid_agg_rx->reorder_buf =
kcalloc(buf_size, sizeof(struct sk_buff_head), GFP_KERNEL);
tid_agg_rx->reorder_time =
kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL);
if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) {
kfree(tid_agg_rx->reorder_buf);
kfree(tid_agg_rx->reorder_time);
kfree(tid_agg_rx);
goto end;
}
for (i = 0; i < buf_size; i++)
__skb_queue_head_init(&tid_agg_rx->reorder_buf[i]);
ret = drv_ampdu_action(local, sta->sdata, ¶ms);
ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
sta->sta.addr, tid, ret);
if (ret) {
kfree(tid_agg_rx->reorder_buf);
kfree(tid_agg_rx->reorder_time);
kfree(tid_agg_rx);
goto end;
}
/* update data */
tid_agg_rx->ssn = start_seq_num;
tid_agg_rx->head_seq_num = start_seq_num;
tid_agg_rx->buf_size = buf_size;
tid_agg_rx->timeout = timeout;
tid_agg_rx->stored_mpdu_num = 0;
tid_agg_rx->auto_seq = auto_seq;
tid_agg_rx->started = false;
tid_agg_rx->reorder_buf_filtered = 0;
tid_agg_rx->tid = tid;
tid_agg_rx->sta = sta;
status = WLAN_STATUS_SUCCESS;
/* activate it for RX */
rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
if (timeout) {
mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
tid_agg_rx->last_rx = jiffies;
}
end:
if (status == WLAN_STATUS_SUCCESS) {
__set_bit(tid, sta->ampdu_mlme.agg_session_valid);
__clear_bit(tid, sta->ampdu_mlme.unexpected_agg);
sta->ampdu_mlme.tid_rx_token[tid] = dialog_token;
}
if (tx)
ieee80211_send_addba_resp(sta, sta->sta.addr, tid,
dialog_token, status, 1, buf_size,
timeout, addbaext);
}
static void __ieee80211_start_rx_ba_session(struct sta_info *sta,
u8 dialog_token, u16 timeout,
u16 start_seq_num, u16 ba_policy,
u16 tid, u16 buf_size, bool tx,
bool auto_seq,
const struct ieee80211_addba_ext_ie *addbaext)
{
mutex_lock(&sta->ampdu_mlme.mtx);
___ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
start_seq_num, ba_policy, tid,
buf_size, tx, auto_seq, addbaext);
mutex_unlock(&sta->ampdu_mlme.mtx);
}
void ieee80211_process_addba_request(struct ieee80211_local *local,
struct sta_info *sta,
struct ieee80211_mgmt *mgmt,
size_t len)
{
u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num;
struct ieee802_11_elems *elems = NULL;
u8 dialog_token;
int ies_len;
/* extract session parameters from addba request frame */
dialog_token = mgmt->u.action.u.addba_req.dialog_token;
timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
start_seq_num =
le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
ies_len = len - offsetof(struct ieee80211_mgmt,
u.action.u.addba_req.variable);
if (ies_len) {
elems = ieee802_11_parse_elems(mgmt->u.action.u.addba_req.variable,
ies_len, true, NULL);
if (!elems || elems->parse_error)
goto free;
}
if (sta->sta.deflink.eht_cap.has_eht && elems && elems->addba_ext_ie) {
u8 buf_size_1k = u8_get_bits(elems->addba_ext_ie->data,
IEEE80211_ADDBA_EXT_BUF_SIZE_MASK);
buf_size |= buf_size_1k << IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT;
}
__ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
start_seq_num, ba_policy, tid,
buf_size, true, false,
elems ? elems->addba_ext_ie : NULL);
free:
kfree(elems);
}
void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif,
const u8 *addr, unsigned int tid)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
rcu_read_lock();
sta = sta_info_get_bss(sdata, addr);
if (!sta)
goto unlock;
set_bit(tid, sta->ampdu_mlme.tid_rx_manage_offl);
ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
unlock:
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_manage_rx_ba_offl);
void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif,
const u8 *addr, unsigned int tid)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
rcu_read_lock();
sta = sta_info_get_bss(sdata, addr);
if (!sta)
goto unlock;
set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired);
ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
unlock:
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_rx_ba_timer_expired);
| linux-master | net/mac80211/agg-rx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2004, Instant802 Networks, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2022 Intel Corporation
*/
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/types.h>
#include <net/ip.h>
#include <net/pkt_sched.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "wme.h"
/* Default mapping in classifier to work with default
* queue setup.
*/
const int ieee802_1d_to_ac[8] = {
IEEE80211_AC_BE,
IEEE80211_AC_BK,
IEEE80211_AC_BK,
IEEE80211_AC_BE,
IEEE80211_AC_VI,
IEEE80211_AC_VI,
IEEE80211_AC_VO,
IEEE80211_AC_VO
};
static int wme_downgrade_ac(struct sk_buff *skb)
{
switch (skb->priority) {
case 6:
case 7:
skb->priority = 5; /* VO -> VI */
return 0;
case 4:
case 5:
skb->priority = 3; /* VI -> BE */
return 0;
case 0:
case 3:
skb->priority = 2; /* BE -> BK */
return 0;
default:
return -1;
}
}
/**
* ieee80211_fix_reserved_tid - return the TID to use if this one is reserved
* @tid: the assumed-reserved TID
*
* Returns: the alternative TID to use, or 0 on error
*/
static inline u8 ieee80211_fix_reserved_tid(u8 tid)
{
switch (tid) {
case 0:
return 3;
case 1:
return 2;
case 2:
return 1;
case 3:
return 0;
case 4:
return 5;
case 5:
return 4;
case 6:
return 7;
case 7:
return 6;
}
return 0;
}
static u16 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
/* in case we are a client verify acm is not set for this ac */
while (sdata->wmm_acm & BIT(skb->priority)) {
int ac = ieee802_1d_to_ac[skb->priority];
if (ifmgd->tx_tspec[ac].admitted_time &&
skb->priority == ifmgd->tx_tspec[ac].up)
return ac;
if (wme_downgrade_ac(skb)) {
/*
* This should not really happen. The AP has marked all
* lower ACs to require admission control which is not
* a reasonable configuration. Allow the frame to be
* transmitted using AC_BK as a workaround.
*/
break;
}
}
/* Check to see if this is a reserved TID */
if (sta && sta->reserved_tid == skb->priority)
skb->priority = ieee80211_fix_reserved_tid(skb->priority);
/* look up which queue to use for frames with this 1d tag */
return ieee802_1d_to_ac[skb->priority];
}
/* Indicate which queue to use for this fully formed 802.11 frame */
u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_hdr *hdr)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 *p;
/* Ensure hash is set prior to potential SW encryption */
skb_get_hash(skb);
if ((info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER) ||
local->hw.queues < IEEE80211_NUM_ACS)
return 0;
if (!ieee80211_is_data(hdr->frame_control)) {
skb->priority = 7;
return ieee802_1d_to_ac[skb->priority];
}
if (!ieee80211_is_data_qos(hdr->frame_control)) {
skb->priority = 0;
return ieee802_1d_to_ac[skb->priority];
}
p = ieee80211_get_qos_ctl(hdr);
skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
return ieee80211_downgrade_queue(sdata, NULL, skb);
}
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb)
{
const struct ethhdr *eth = (void *)skb->data;
struct mac80211_qos_map *qos_map;
bool qos;
/* Ensure hash is set prior to potential SW encryption */
skb_get_hash(skb);
/* all mesh/ocb stations are required to support WME */
if ((sdata->vif.type == NL80211_IFTYPE_MESH_POINT &&
!is_multicast_ether_addr(eth->h_dest)) ||
(sdata->vif.type == NL80211_IFTYPE_OCB && sta))
qos = true;
else if (sta)
qos = sta->sta.wme;
else
qos = false;
if (!qos) {
skb->priority = 0; /* required for correct WPA/11i MIC */
return IEEE80211_AC_BE;
}
if (skb->protocol == sdata->control_port_protocol) {
skb->priority = 7;
goto downgrade;
}
/* use the data classifier to determine what 802.1d tag the
* data frame has */
qos_map = rcu_dereference(sdata->qos_map);
skb->priority = cfg80211_classify8021d(skb, qos_map ?
&qos_map->qos_map : NULL);
downgrade:
return ieee80211_downgrade_queue(sdata, sta, skb);
}
/**
* ieee80211_set_qos_hdr - Fill in the QoS header if there is one.
*
* @sdata: local subif
* @skb: packet to be updated
*/
void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
u8 flags;
u8 *p;
if (!ieee80211_is_data_qos(hdr->frame_control))
return;
p = ieee80211_get_qos_ctl(hdr);
/* don't overwrite the QoS field of injected frames */
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
/* do take into account Ack policy of injected frames */
if (*p & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
info->flags |= IEEE80211_TX_CTL_NO_ACK;
return;
}
/* set up the first byte */
/*
* preserve everything but the TID and ACK policy
* (which we both write here)
*/
flags = *p & ~(IEEE80211_QOS_CTL_TID_MASK |
IEEE80211_QOS_CTL_ACK_POLICY_MASK);
if (is_multicast_ether_addr(hdr->addr1) ||
sdata->noack_map & BIT(tid)) {
flags |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
info->flags |= IEEE80211_TX_CTL_NO_ACK;
}
*p = flags | tid;
/* set up the second byte */
p++;
if (ieee80211_vif_is_mesh(&sdata->vif)) {
/* preserve RSPI and Mesh PS Level bit */
*p &= ((IEEE80211_QOS_CTL_RSPI |
IEEE80211_QOS_CTL_MESH_PS_LEVEL) >> 8);
/* Nulls don't have a mesh header (frame body) */
if (!ieee80211_is_qos_nullfunc(hdr->frame_control))
*p |= (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8);
} else {
*p = 0;
}
}
| linux-master | net/mac80211/wme.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2006 Jiri Benc <[email protected]>
* Copyright 2007 Johannes Berg <[email protected]>
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <net/mac80211.h>
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "rate.h"
#include "debugfs.h"
#include "debugfs_netdev.h"
#include "driver-ops.h"
static ssize_t ieee80211_if_read(
void *data,
char __user *userbuf,
size_t count, loff_t *ppos,
ssize_t (*format)(const void *, char *, int))
{
char buf[200];
ssize_t ret = -EINVAL;
read_lock(&dev_base_lock);
ret = (*format)(data, buf, sizeof(buf));
read_unlock(&dev_base_lock);
if (ret >= 0)
ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
return ret;
}
static ssize_t ieee80211_if_write(
void *data,
const char __user *userbuf,
size_t count, loff_t *ppos,
ssize_t (*write)(void *, const char *, int))
{
char buf[64];
ssize_t ret;
if (count >= sizeof(buf))
return -E2BIG;
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
rtnl_lock();
ret = (*write)(data, buf, count);
rtnl_unlock();
return ret;
}
#define IEEE80211_IF_FMT(name, type, field, format_string) \
static ssize_t ieee80211_if_fmt_##name( \
const type *data, char *buf, \
int buflen) \
{ \
return scnprintf(buf, buflen, format_string, data->field); \
}
#define IEEE80211_IF_FMT_DEC(name, type, field) \
IEEE80211_IF_FMT(name, type, field, "%d\n")
#define IEEE80211_IF_FMT_HEX(name, type, field) \
IEEE80211_IF_FMT(name, type, field, "%#x\n")
#define IEEE80211_IF_FMT_LHEX(name, type, field) \
IEEE80211_IF_FMT(name, type, field, "%#lx\n")
#define IEEE80211_IF_FMT_HEXARRAY(name, type, field) \
static ssize_t ieee80211_if_fmt_##name( \
const type *data, \
char *buf, int buflen) \
{ \
char *p = buf; \
int i; \
for (i = 0; i < sizeof(data->field); i++) { \
p += scnprintf(p, buflen + buf - p, "%.2x ", \
data->field[i]); \
} \
p += scnprintf(p, buflen + buf - p, "\n"); \
return p - buf; \
}
#define IEEE80211_IF_FMT_ATOMIC(name, type, field) \
static ssize_t ieee80211_if_fmt_##name( \
const type *data, \
char *buf, int buflen) \
{ \
return scnprintf(buf, buflen, "%d\n", atomic_read(&data->field));\
}
#define IEEE80211_IF_FMT_MAC(name, type, field) \
static ssize_t ieee80211_if_fmt_##name( \
const type *data, char *buf, \
int buflen) \
{ \
return scnprintf(buf, buflen, "%pM\n", data->field); \
}
#define IEEE80211_IF_FMT_JIFFIES_TO_MS(name, type, field) \
static ssize_t ieee80211_if_fmt_##name( \
const type *data, \
char *buf, int buflen) \
{ \
return scnprintf(buf, buflen, "%d\n", \
jiffies_to_msecs(data->field)); \
}
#define _IEEE80211_IF_FILE_OPS(name, _read, _write) \
static const struct file_operations name##_ops = { \
.read = (_read), \
.write = (_write), \
.open = simple_open, \
.llseek = generic_file_llseek, \
}
#define _IEEE80211_IF_FILE_R_FN(name, type) \
static ssize_t ieee80211_if_read_##name(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
ssize_t (*fn)(const void *, char *, int) = (void *) \
((ssize_t (*)(const type, char *, int)) \
ieee80211_if_fmt_##name); \
return ieee80211_if_read(file->private_data, \
userbuf, count, ppos, fn); \
}
#define _IEEE80211_IF_FILE_W_FN(name, type) \
static ssize_t ieee80211_if_write_##name(struct file *file, \
const char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
ssize_t (*fn)(void *, const char *, int) = (void *) \
((ssize_t (*)(type, const char *, int)) \
ieee80211_if_parse_##name); \
return ieee80211_if_write(file->private_data, userbuf, count, \
ppos, fn); \
}
#define IEEE80211_IF_FILE_R(name) \
_IEEE80211_IF_FILE_R_FN(name, struct ieee80211_sub_if_data *) \
_IEEE80211_IF_FILE_OPS(name, ieee80211_if_read_##name, NULL)
#define IEEE80211_IF_FILE_W(name) \
_IEEE80211_IF_FILE_W_FN(name, struct ieee80211_sub_if_data *) \
_IEEE80211_IF_FILE_OPS(name, NULL, ieee80211_if_write_##name)
#define IEEE80211_IF_FILE_RW(name) \
_IEEE80211_IF_FILE_R_FN(name, struct ieee80211_sub_if_data *) \
_IEEE80211_IF_FILE_W_FN(name, struct ieee80211_sub_if_data *) \
_IEEE80211_IF_FILE_OPS(name, ieee80211_if_read_##name, \
ieee80211_if_write_##name)
#define IEEE80211_IF_FILE(name, field, format) \
IEEE80211_IF_FMT_##format(name, struct ieee80211_sub_if_data, field) \
IEEE80211_IF_FILE_R(name)
/* Same but with a link_ prefix in the ops variable name and different type */
#define IEEE80211_IF_LINK_FILE_R(name) \
_IEEE80211_IF_FILE_R_FN(name, struct ieee80211_link_data *) \
_IEEE80211_IF_FILE_OPS(link_##name, ieee80211_if_read_##name, NULL)
#define IEEE80211_IF_LINK_FILE_W(name) \
_IEEE80211_IF_FILE_W_FN(name) \
_IEEE80211_IF_FILE_OPS(link_##name, NULL, ieee80211_if_write_##name)
#define IEEE80211_IF_LINK_FILE_RW(name) \
_IEEE80211_IF_FILE_R_FN(name, struct ieee80211_link_data *) \
_IEEE80211_IF_FILE_W_FN(name, struct ieee80211_link_data *) \
_IEEE80211_IF_FILE_OPS(link_##name, ieee80211_if_read_##name, \
ieee80211_if_write_##name)
#define IEEE80211_IF_LINK_FILE(name, field, format) \
IEEE80211_IF_FMT_##format(name, struct ieee80211_link_data, field) \
IEEE80211_IF_LINK_FILE_R(name)
/* common attributes */
IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[NL80211_BAND_2GHZ],
HEX);
IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[NL80211_BAND_5GHZ],
HEX);
IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz,
rc_rateidx_mcs_mask[NL80211_BAND_2GHZ], HEXARRAY);
IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
rc_rateidx_mcs_mask[NL80211_BAND_5GHZ], HEXARRAY);
static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz(
const struct ieee80211_sub_if_data *sdata,
char *buf, int buflen)
{
int i, len = 0;
const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_2GHZ];
for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
len += scnprintf(buf + len, buflen - len, "\n");
return len;
}
IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_2ghz);
static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz(
const struct ieee80211_sub_if_data *sdata,
char *buf, int buflen)
{
int i, len = 0;
const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_5GHZ];
for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
len += scnprintf(buf + len, buflen - len, "\n");
return len;
}
IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz);
IEEE80211_IF_FILE(flags, flags, HEX);
IEEE80211_IF_FILE(state, state, LHEX);
IEEE80211_IF_LINK_FILE(txpower, conf->txpower, DEC);
IEEE80211_IF_LINK_FILE(ap_power_level, ap_power_level, DEC);
IEEE80211_IF_LINK_FILE(user_power_level, user_power_level, DEC);
static ssize_t
ieee80211_if_fmt_hw_queues(const struct ieee80211_sub_if_data *sdata,
char *buf, int buflen)
{
int len;
len = scnprintf(buf, buflen, "AC queues: VO:%d VI:%d BE:%d BK:%d\n",
sdata->vif.hw_queue[IEEE80211_AC_VO],
sdata->vif.hw_queue[IEEE80211_AC_VI],
sdata->vif.hw_queue[IEEE80211_AC_BE],
sdata->vif.hw_queue[IEEE80211_AC_BK]);
if (sdata->vif.type == NL80211_IFTYPE_AP)
len += scnprintf(buf + len, buflen - len, "cab queue: %d\n",
sdata->vif.cab_queue);
return len;
}
IEEE80211_IF_FILE_R(hw_queues);
/* STA attributes */
IEEE80211_IF_FILE(bssid, deflink.u.mgd.bssid, MAC);
IEEE80211_IF_FILE(aid, vif.cfg.aid, DEC);
IEEE80211_IF_FILE(beacon_timeout, u.mgd.beacon_timeout, JIFFIES_TO_MS);
static int ieee80211_set_smps(struct ieee80211_link_data *link,
enum ieee80211_smps_mode smps_mode)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
int err;
if (sdata->vif.driver_flags & IEEE80211_VIF_DISABLE_SMPS_OVERRIDE)
return -EOPNOTSUPP;
if (!(local->hw.wiphy->features & NL80211_FEATURE_STATIC_SMPS) &&
smps_mode == IEEE80211_SMPS_STATIC)
return -EINVAL;
/* auto should be dynamic if in PS mode */
if (!(local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS) &&
(smps_mode == IEEE80211_SMPS_DYNAMIC ||
smps_mode == IEEE80211_SMPS_AUTOMATIC))
return -EINVAL;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
sdata_lock(sdata);
err = __ieee80211_request_smps_mgd(link->sdata, link, smps_mode);
sdata_unlock(sdata);
return err;
}
static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
[IEEE80211_SMPS_AUTOMATIC] = "auto",
[IEEE80211_SMPS_OFF] = "off",
[IEEE80211_SMPS_STATIC] = "static",
[IEEE80211_SMPS_DYNAMIC] = "dynamic",
};
static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_link_data *link,
char *buf, int buflen)
{
if (link->sdata->vif.type == NL80211_IFTYPE_STATION)
return snprintf(buf, buflen, "request: %s\nused: %s\n",
smps_modes[link->u.mgd.req_smps],
smps_modes[link->smps_mode]);
return -EINVAL;
}
static ssize_t ieee80211_if_parse_smps(struct ieee80211_link_data *link,
const char *buf, int buflen)
{
enum ieee80211_smps_mode mode;
for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) {
if (strncmp(buf, smps_modes[mode], buflen) == 0) {
int err = ieee80211_set_smps(link, mode);
if (!err)
return buflen;
return err;
}
}
return -EINVAL;
}
IEEE80211_IF_LINK_FILE_RW(smps);
static ssize_t ieee80211_if_parse_tkip_mic_test(
struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
struct ieee80211_local *local = sdata->local;
u8 addr[ETH_ALEN];
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
__le16 fc;
if (!mac_pton(buf, addr))
return -EINVAL;
if (!ieee80211_sdata_running(sdata))
return -ENOTCONN;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100);
if (!skb)
return -ENOMEM;
skb_reserve(skb, local->hw.extra_tx_headroom);
hdr = skb_put_zero(skb, 24);
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA BSSID SA */
memcpy(hdr->addr1, addr, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN);
break;
case NL80211_IFTYPE_STATION:
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
/* BSSID SA DA */
sdata_lock(sdata);
if (!sdata->u.mgd.associated) {
sdata_unlock(sdata);
dev_kfree_skb(skb);
return -ENOTCONN;
}
memcpy(hdr->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr->addr3, addr, ETH_ALEN);
sdata_unlock(sdata);
break;
default:
dev_kfree_skb(skb);
return -EOPNOTSUPP;
}
hdr->frame_control = fc;
/*
* Add some length to the test frame to make it look bit more valid.
* The exact contents does not matter since the recipient is required
* to drop this because of the Michael MIC failure.
*/
skb_put_zero(skb, 50);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;
ieee80211_tx_skb(sdata, skb);
return buflen;
}
IEEE80211_IF_FILE_W(tkip_mic_test);
static ssize_t ieee80211_if_parse_beacon_loss(
struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
if (!ieee80211_sdata_running(sdata) || !sdata->vif.cfg.assoc)
return -ENOTCONN;
ieee80211_beacon_loss(&sdata->vif);
return buflen;
}
IEEE80211_IF_FILE_W(beacon_loss);
static ssize_t ieee80211_if_fmt_uapsd_queues(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
{
const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_queues);
}
static ssize_t ieee80211_if_parse_uapsd_queues(
struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 val;
int ret;
ret = kstrtou8(buf, 0, &val);
if (ret)
return ret;
if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
return -ERANGE;
ifmgd->uapsd_queues = val;
return buflen;
}
IEEE80211_IF_FILE_RW(uapsd_queues);
static ssize_t ieee80211_if_fmt_uapsd_max_sp_len(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
{
const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_max_sp_len);
}
static ssize_t ieee80211_if_parse_uapsd_max_sp_len(
struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
unsigned long val;
int ret;
ret = kstrtoul(buf, 0, &val);
if (ret)
return -EINVAL;
if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
return -ERANGE;
ifmgd->uapsd_max_sp_len = val;
return buflen;
}
IEEE80211_IF_FILE_RW(uapsd_max_sp_len);
static ssize_t ieee80211_if_fmt_tdls_wider_bw(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
{
const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
bool tdls_wider_bw;
tdls_wider_bw = ieee80211_hw_check(&sdata->local->hw, TDLS_WIDER_BW) &&
!ifmgd->tdls_wider_bw_prohibited;
return snprintf(buf, buflen, "%d\n", tdls_wider_bw);
}
static ssize_t ieee80211_if_parse_tdls_wider_bw(
struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 val;
int ret;
ret = kstrtou8(buf, 0, &val);
if (ret)
return ret;
ifmgd->tdls_wider_bw_prohibited = !val;
return buflen;
}
IEEE80211_IF_FILE_RW(tdls_wider_bw);
/* AP attributes */
IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC);
IEEE80211_IF_FILE(num_sta_ps, u.ap.ps.num_sta_ps, ATOMIC);
IEEE80211_IF_FILE(dtim_count, u.ap.ps.dtim_count, DEC);
IEEE80211_IF_FILE(num_mcast_sta_vlan, u.vlan.num_mcast_sta, ATOMIC);
static ssize_t ieee80211_if_fmt_num_buffered_multicast(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
{
return scnprintf(buf, buflen, "%u\n",
skb_queue_len(&sdata->u.ap.ps.bc_buf));
}
IEEE80211_IF_FILE_R(num_buffered_multicast);
static ssize_t ieee80211_if_fmt_aqm(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
{
struct ieee80211_local *local = sdata->local;
struct txq_info *txqi;
int len;
if (!sdata->vif.txq)
return 0;
txqi = to_txq_info(sdata->vif.txq);
spin_lock_bh(&local->fq.lock);
rcu_read_lock();
len = scnprintf(buf,
buflen,
"ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets\n"
"%u %u %u %u %u %u %u %u %u %u\n",
txqi->txq.ac,
txqi->tin.backlog_bytes,
txqi->tin.backlog_packets,
txqi->tin.flows,
txqi->cstats.drop_count,
txqi->cstats.ecn_mark,
txqi->tin.overlimit,
txqi->tin.collisions,
txqi->tin.tx_bytes,
txqi->tin.tx_packets);
rcu_read_unlock();
spin_unlock_bh(&local->fq.lock);
return len;
}
IEEE80211_IF_FILE_R(aqm);
IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX);
/* IBSS attributes */
static ssize_t ieee80211_if_fmt_tsf(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
{
struct ieee80211_local *local = sdata->local;
u64 tsf;
tsf = drv_get_tsf(local, (struct ieee80211_sub_if_data *)sdata);
return scnprintf(buf, buflen, "0x%016llx\n", (unsigned long long) tsf);
}
static ssize_t ieee80211_if_parse_tsf(
struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
struct ieee80211_local *local = sdata->local;
unsigned long long tsf;
int ret;
int tsf_is_delta = 0;
if (strncmp(buf, "reset", 5) == 0) {
if (local->ops->reset_tsf) {
drv_reset_tsf(local, sdata);
wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
}
} else {
if (buflen > 10 && buf[1] == '=') {
if (buf[0] == '+')
tsf_is_delta = 1;
else if (buf[0] == '-')
tsf_is_delta = -1;
else
return -EINVAL;
buf += 2;
}
ret = kstrtoull(buf, 10, &tsf);
if (ret < 0)
return ret;
if (tsf_is_delta && local->ops->offset_tsf) {
drv_offset_tsf(local, sdata, tsf_is_delta * tsf);
wiphy_info(local->hw.wiphy,
"debugfs offset TSF by %018lld\n",
tsf_is_delta * tsf);
} else if (local->ops->set_tsf) {
if (tsf_is_delta)
tsf = drv_get_tsf(local, sdata) +
tsf_is_delta * tsf;
drv_set_tsf(local, sdata, tsf);
wiphy_info(local->hw.wiphy,
"debugfs set TSF to %#018llx\n", tsf);
}
}
ieee80211_recalc_dtim(local, sdata);
return buflen;
}
IEEE80211_IF_FILE_RW(tsf);
static ssize_t ieee80211_if_fmt_valid_links(const struct ieee80211_sub_if_data *sdata,
char *buf, int buflen)
{
return snprintf(buf, buflen, "0x%x\n", sdata->vif.valid_links);
}
IEEE80211_IF_FILE_R(valid_links);
static ssize_t ieee80211_if_fmt_active_links(const struct ieee80211_sub_if_data *sdata,
char *buf, int buflen)
{
return snprintf(buf, buflen, "0x%x\n", sdata->vif.active_links);
}
static ssize_t ieee80211_if_parse_active_links(struct ieee80211_sub_if_data *sdata,
const char *buf, int buflen)
{
u16 active_links;
if (kstrtou16(buf, 0, &active_links))
return -EINVAL;
return ieee80211_set_active_links(&sdata->vif, active_links) ?: buflen;
}
IEEE80211_IF_FILE_RW(active_links);
IEEE80211_IF_LINK_FILE(addr, conf->addr, MAC);
#ifdef CONFIG_MAC80211_MESH
IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC);
/* Mesh stats attributes */
IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
IEEE80211_IF_FILE(dropped_frames_no_route,
u.mesh.mshstats.dropped_frames_no_route, DEC);
/* Mesh parameters */
IEEE80211_IF_FILE(dot11MeshMaxRetries,
u.mesh.mshcfg.dot11MeshMaxRetries, DEC);
IEEE80211_IF_FILE(dot11MeshRetryTimeout,
u.mesh.mshcfg.dot11MeshRetryTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshConfirmTimeout,
u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHoldingTimeout,
u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC);
IEEE80211_IF_FILE(element_ttl, u.mesh.mshcfg.element_ttl, DEC);
IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC);
IEEE80211_IF_FILE(dot11MeshMaxPeerLinks,
u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout,
u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval,
u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC);
IEEE80211_IF_FILE(path_refresh_time,
u.mesh.mshcfg.path_refresh_time, DEC);
IEEE80211_IF_FILE(min_discovery_timeout,
u.mesh.mshcfg.min_discovery_timeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPactivePathToRootTimeout,
u.mesh.mshcfg.dot11MeshHWMPactivePathToRootTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMProotInterval,
u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval,
u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC);
IEEE80211_IF_FILE(power_mode, u.mesh.mshcfg.power_mode, DEC);
IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration,
u.mesh.mshcfg.dot11MeshAwakeWindowDuration, DEC);
IEEE80211_IF_FILE(dot11MeshConnectedToMeshGate,
u.mesh.mshcfg.dot11MeshConnectedToMeshGate, DEC);
IEEE80211_IF_FILE(dot11MeshNolearn, u.mesh.mshcfg.dot11MeshNolearn, DEC);
IEEE80211_IF_FILE(dot11MeshConnectedToAuthServer,
u.mesh.mshcfg.dot11MeshConnectedToAuthServer, DEC);
#endif
#define DEBUGFS_ADD_MODE(name, mode) \
debugfs_create_file(#name, mode, sdata->vif.debugfs_dir, \
sdata, &name##_ops)
#define DEBUGFS_ADD_X(_bits, _name, _mode) \
debugfs_create_x##_bits(#_name, _mode, sdata->vif.debugfs_dir, \
&sdata->vif._name)
#define DEBUGFS_ADD_X8(_name, _mode) \
DEBUGFS_ADD_X(8, _name, _mode)
#define DEBUGFS_ADD_X16(_name, _mode) \
DEBUGFS_ADD_X(16, _name, _mode)
#define DEBUGFS_ADD_X32(_name, _mode) \
DEBUGFS_ADD_X(32, _name, _mode)
#define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400)
static void add_common_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
DEBUGFS_ADD(hw_queues);
if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
sdata->vif.type != NL80211_IFTYPE_NAN)
DEBUGFS_ADD(aqm);
}
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(bssid);
DEBUGFS_ADD(aid);
DEBUGFS_ADD(beacon_timeout);
DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
DEBUGFS_ADD_MODE(beacon_loss, 0200);
DEBUGFS_ADD_MODE(uapsd_queues, 0600);
DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600);
DEBUGFS_ADD_MODE(tdls_wider_bw, 0600);
DEBUGFS_ADD_MODE(valid_links, 0400);
DEBUGFS_ADD_MODE(active_links, 0600);
DEBUGFS_ADD_X16(dormant_links, 0400);
}
static void add_ap_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(num_mcast_sta);
DEBUGFS_ADD(num_sta_ps);
DEBUGFS_ADD(dtim_count);
DEBUGFS_ADD(num_buffered_multicast);
DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
DEBUGFS_ADD_MODE(multicast_to_unicast, 0600);
}
static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
{
/* add num_mcast_sta_vlan using name num_mcast_sta */
debugfs_create_file("num_mcast_sta", 0400, sdata->vif.debugfs_dir,
sdata, &num_mcast_sta_vlan_ops);
}
static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD_MODE(tsf, 0600);
}
#ifdef CONFIG_MAC80211_MESH
static void add_mesh_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD_MODE(tsf, 0600);
DEBUGFS_ADD_MODE(estab_plinks, 0400);
}
static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
{
struct dentry *dir = debugfs_create_dir("mesh_stats",
sdata->vif.debugfs_dir);
#define MESHSTATS_ADD(name)\
debugfs_create_file(#name, 0400, dir, sdata, &name##_ops)
MESHSTATS_ADD(fwded_mcast);
MESHSTATS_ADD(fwded_unicast);
MESHSTATS_ADD(fwded_frames);
MESHSTATS_ADD(dropped_frames_ttl);
MESHSTATS_ADD(dropped_frames_no_route);
#undef MESHSTATS_ADD
}
static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
{
struct dentry *dir = debugfs_create_dir("mesh_config",
sdata->vif.debugfs_dir);
#define MESHPARAMS_ADD(name) \
debugfs_create_file(#name, 0600, dir, sdata, &name##_ops)
MESHPARAMS_ADD(dot11MeshMaxRetries);
MESHPARAMS_ADD(dot11MeshRetryTimeout);
MESHPARAMS_ADD(dot11MeshConfirmTimeout);
MESHPARAMS_ADD(dot11MeshHoldingTimeout);
MESHPARAMS_ADD(dot11MeshTTL);
MESHPARAMS_ADD(element_ttl);
MESHPARAMS_ADD(auto_open_plinks);
MESHPARAMS_ADD(dot11MeshMaxPeerLinks);
MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout);
MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval);
MESHPARAMS_ADD(dot11MeshHWMPperrMinInterval);
MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime);
MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
MESHPARAMS_ADD(path_refresh_time);
MESHPARAMS_ADD(min_discovery_timeout);
MESHPARAMS_ADD(dot11MeshHWMPRootMode);
MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
MESHPARAMS_ADD(dot11MeshForwarding);
MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
MESHPARAMS_ADD(rssi_threshold);
MESHPARAMS_ADD(ht_opmode);
MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout);
MESHPARAMS_ADD(dot11MeshHWMProotInterval);
MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval);
MESHPARAMS_ADD(power_mode);
MESHPARAMS_ADD(dot11MeshAwakeWindowDuration);
MESHPARAMS_ADD(dot11MeshConnectedToMeshGate);
MESHPARAMS_ADD(dot11MeshNolearn);
MESHPARAMS_ADD(dot11MeshConnectedToAuthServer);
#undef MESHPARAMS_ADD
}
#endif
static void add_files(struct ieee80211_sub_if_data *sdata)
{
if (!sdata->vif.debugfs_dir)
return;
DEBUGFS_ADD(flags);
DEBUGFS_ADD(state);
if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
add_common_files(sdata);
switch (sdata->vif.type) {
case NL80211_IFTYPE_MESH_POINT:
#ifdef CONFIG_MAC80211_MESH
add_mesh_files(sdata);
add_mesh_stats(sdata);
add_mesh_config(sdata);
#endif
break;
case NL80211_IFTYPE_STATION:
add_sta_files(sdata);
break;
case NL80211_IFTYPE_ADHOC:
add_ibss_files(sdata);
break;
case NL80211_IFTYPE_AP:
add_ap_files(sdata);
break;
case NL80211_IFTYPE_AP_VLAN:
add_vlan_files(sdata);
break;
default:
break;
}
}
#undef DEBUGFS_ADD_MODE
#undef DEBUGFS_ADD
#define DEBUGFS_ADD_MODE(dentry, name, mode) \
debugfs_create_file(#name, mode, dentry, \
link, &link_##name##_ops)
#define DEBUGFS_ADD(dentry, name) DEBUGFS_ADD_MODE(dentry, name, 0400)
static void add_link_files(struct ieee80211_link_data *link,
struct dentry *dentry)
{
DEBUGFS_ADD(dentry, txpower);
DEBUGFS_ADD(dentry, user_power_level);
DEBUGFS_ADD(dentry, ap_power_level);
switch (link->sdata->vif.type) {
case NL80211_IFTYPE_STATION:
DEBUGFS_ADD_MODE(dentry, smps, 0600);
break;
default:
break;
}
}
void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
{
char buf[10+IFNAMSIZ];
sprintf(buf, "netdev:%s", sdata->name);
sdata->vif.debugfs_dir = debugfs_create_dir(buf,
sdata->local->hw.wiphy->debugfsdir);
sdata->debugfs.subdir_stations = debugfs_create_dir("stations",
sdata->vif.debugfs_dir);
add_files(sdata);
if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))
add_link_files(&sdata->deflink, sdata->vif.debugfs_dir);
}
void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
{
if (!sdata->vif.debugfs_dir)
return;
debugfs_remove_recursive(sdata->vif.debugfs_dir);
sdata->vif.debugfs_dir = NULL;
sdata->debugfs.subdir_stations = NULL;
}
void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
{
struct dentry *dir;
char buf[10 + IFNAMSIZ];
dir = sdata->vif.debugfs_dir;
if (IS_ERR_OR_NULL(dir))
return;
sprintf(buf, "netdev:%s", sdata->name);
debugfs_rename(dir->d_parent, dir, dir->d_parent, buf);
}
void ieee80211_link_debugfs_add(struct ieee80211_link_data *link)
{
char link_dir_name[10];
if (WARN_ON(!link->sdata->vif.debugfs_dir))
return;
/* For now, this should not be called for non-MLO capable drivers */
if (WARN_ON(!(link->sdata->local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)))
return;
snprintf(link_dir_name, sizeof(link_dir_name),
"link-%d", link->link_id);
link->debugfs_dir =
debugfs_create_dir(link_dir_name,
link->sdata->vif.debugfs_dir);
DEBUGFS_ADD(link->debugfs_dir, addr);
add_link_files(link, link->debugfs_dir);
}
void ieee80211_link_debugfs_remove(struct ieee80211_link_data *link)
{
if (!link->sdata->vif.debugfs_dir || !link->debugfs_dir) {
link->debugfs_dir = NULL;
return;
}
if (link->debugfs_dir == link->sdata->vif.debugfs_dir) {
WARN_ON(link != &link->sdata->deflink);
link->debugfs_dir = NULL;
return;
}
debugfs_remove_recursive(link->debugfs_dir);
link->debugfs_dir = NULL;
}
void ieee80211_link_debugfs_drv_add(struct ieee80211_link_data *link)
{
if (WARN_ON(!link->debugfs_dir))
return;
drv_link_add_debugfs(link->sdata->local, link->sdata,
link->conf, link->debugfs_dir);
}
void ieee80211_link_debugfs_drv_remove(struct ieee80211_link_data *link)
{
if (!link || !link->debugfs_dir)
return;
if (WARN_ON(link->debugfs_dir == link->sdata->vif.debugfs_dir))
return;
/* Recreate the directory excluding the driver data */
debugfs_remove_recursive(link->debugfs_dir);
link->debugfs_dir = NULL;
ieee80211_link_debugfs_add(link);
}
| linux-master | net/mac80211/debugfs_netdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* BSS client mode implementation
* Copyright 2003-2008, Jouni Malinen <[email protected]>
* Copyright 2004, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 - 2023 Intel Corporation
*/
#include <linux/delay.h>
#include <linux/fips.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "rate.h"
#include "led.h"
#include "fils_aead.h"
#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
#define IEEE80211_AUTH_TIMEOUT_SAE (HZ * 2)
#define IEEE80211_AUTH_MAX_TRIES 3
#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
#define IEEE80211_AUTH_WAIT_SAE_RETRY (HZ * 2)
#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
#define IEEE80211_ASSOC_MAX_TRIES 3
static int max_nullfunc_tries = 2;
module_param(max_nullfunc_tries, int, 0644);
MODULE_PARM_DESC(max_nullfunc_tries,
"Maximum nullfunc tx tries before disconnecting (reason 4).");
static int max_probe_tries = 5;
module_param(max_probe_tries, int, 0644);
MODULE_PARM_DESC(max_probe_tries,
"Maximum probe tries before disconnecting (reason 4).");
/*
* Beacon loss timeout is calculated as N frames times the
* advertised beacon interval. This may need to be somewhat
* higher than what hardware might detect to account for
* delays in the host processing frames. But since we also
* probe on beacon miss before declaring the connection lost
* default to what we want.
*/
static int beacon_loss_count = 7;
module_param(beacon_loss_count, int, 0644);
MODULE_PARM_DESC(beacon_loss_count,
"Number of beacon intervals before we decide beacon was lost.");
/*
* Time the connection can be idle before we probe
* it to see if we can still talk to the AP.
*/
#define IEEE80211_CONNECTION_IDLE_TIME (30 * HZ)
/*
* Time we wait for a probe response after sending
* a probe request because of beacon loss or for
* checking the connection still works.
*/
static int probe_wait_ms = 500;
module_param(probe_wait_ms, int, 0644);
MODULE_PARM_DESC(probe_wait_ms,
"Maximum time(ms) to wait for probe response"
" before disconnecting (reason 4).");
/*
* How many Beacon frames need to have been used in average signal strength
* before starting to indicate signal change events.
*/
#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
/*
* Extract from the given disabled subchannel bitmap (raw format
* from the EHT Operation Element) the bits for the subchannel
* we're using right now.
*/
static u16
ieee80211_extract_dis_subch_bmap(const struct ieee80211_eht_operation *eht_oper,
struct cfg80211_chan_def *chandef, u16 bitmap)
{
struct ieee80211_eht_operation_info *info = (void *)eht_oper->optional;
struct cfg80211_chan_def ap_chandef = *chandef;
u32 ap_center_freq, local_center_freq;
u32 ap_bw, local_bw;
int ap_start_freq, local_start_freq;
u16 shift, mask;
if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) ||
!(eht_oper->params &
IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
return 0;
/* set 160/320 supported to get the full AP definition */
ieee80211_chandef_eht_oper(eht_oper, true, true, &ap_chandef);
ap_center_freq = ap_chandef.center_freq1;
ap_bw = 20 * BIT(u8_get_bits(info->control,
IEEE80211_EHT_OPER_CHAN_WIDTH));
ap_start_freq = ap_center_freq - ap_bw / 2;
local_center_freq = chandef->center_freq1;
local_bw = 20 * BIT(ieee80211_chan_width_to_rx_bw(chandef->width));
local_start_freq = local_center_freq - local_bw / 2;
shift = (local_start_freq - ap_start_freq) / 20;
mask = BIT(local_bw / 20) - 1;
return (bitmap >> shift) & mask;
}
/*
* Handle the puncturing bitmap, possibly downgrading bandwidth to get a
* valid bitmap.
*/
static void
ieee80211_handle_puncturing_bitmap(struct ieee80211_link_data *link,
const struct ieee80211_eht_operation *eht_oper,
u16 bitmap, u64 *changed)
{
struct cfg80211_chan_def *chandef = &link->conf->chandef;
u16 extracted;
u64 _changed = 0;
if (!changed)
changed = &_changed;
while (chandef->width > NL80211_CHAN_WIDTH_40) {
extracted =
ieee80211_extract_dis_subch_bmap(eht_oper, chandef,
bitmap);
if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
chandef))
break;
link->u.mgd.conn_flags |=
ieee80211_chandef_downgrade(chandef);
*changed |= BSS_CHANGED_BANDWIDTH;
}
if (chandef->width <= NL80211_CHAN_WIDTH_40)
extracted = 0;
if (link->conf->eht_puncturing != extracted) {
link->conf->eht_puncturing = extracted;
*changed |= BSS_CHANGED_EHT_PUNCTURING;
}
}
/*
* We can have multiple work items (and connection probing)
* scheduling this timer, but we need to take care to only
* reschedule it when it should fire _earlier_ than it was
* asked for before, or if it's not pending right now. This
* function ensures that. Note that it then is required to
* run this function for all timeouts after the first one
* has happened -- the work that runs from this timer will
* do that.
*/
static void run_again(struct ieee80211_sub_if_data *sdata,
unsigned long timeout)
{
sdata_assert_lock(sdata);
if (!timer_pending(&sdata->u.mgd.timer) ||
time_before(timeout, sdata->u.mgd.timer.expires))
mod_timer(&sdata->u.mgd.timer, timeout);
}
void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
{
if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
return;
if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
return;
mod_timer(&sdata->u.mgd.bcn_mon_timer,
round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout));
}
void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
if (unlikely(!ifmgd->associated))
return;
if (ifmgd->probe_send_count)
ifmgd->probe_send_count = 0;
if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
return;
mod_timer(&ifmgd->conn_mon_timer,
round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
}
static int ecw2cw(int ecw)
{
return (1 << ecw) - 1;
}
static ieee80211_conn_flags_t
ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
ieee80211_conn_flags_t conn_flags,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
u32 vht_cap_info,
const struct ieee80211_ht_operation *ht_oper,
const struct ieee80211_vht_operation *vht_oper,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
const struct ieee80211_s1g_oper_ie *s1g_oper,
struct cfg80211_chan_def *chandef, bool tracking)
{
struct cfg80211_chan_def vht_chandef;
struct ieee80211_sta_ht_cap sta_ht_cap;
ieee80211_conn_flags_t ret;
u32 ht_cfreq;
memset(chandef, 0, sizeof(struct cfg80211_chan_def));
chandef->chan = channel;
chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
chandef->center_freq1 = channel->center_freq;
chandef->freq1_offset = channel->freq_offset;
if (channel->band == NL80211_BAND_6GHZ) {
if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, eht_oper,
chandef)) {
mlme_dbg(sdata,
"bad 6 GHz operation, disabling HT/VHT/HE/EHT\n");
ret = IEEE80211_CONN_DISABLE_HT |
IEEE80211_CONN_DISABLE_VHT |
IEEE80211_CONN_DISABLE_HE |
IEEE80211_CONN_DISABLE_EHT;
} else {
ret = 0;
}
vht_chandef = *chandef;
goto out;
} else if (sband->band == NL80211_BAND_S1GHZ) {
if (!ieee80211_chandef_s1g_oper(s1g_oper, chandef)) {
sdata_info(sdata,
"Missing S1G Operation Element? Trying operating == primary\n");
chandef->width = ieee80211_s1g_channel_width(channel);
}
ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_40MHZ |
IEEE80211_CONN_DISABLE_VHT |
IEEE80211_CONN_DISABLE_80P80MHZ |
IEEE80211_CONN_DISABLE_160MHZ;
goto out;
}
memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
if (!ht_oper || !sta_ht_cap.ht_supported) {
mlme_dbg(sdata, "HT operation missing / HT not supported\n");
ret = IEEE80211_CONN_DISABLE_HT |
IEEE80211_CONN_DISABLE_VHT |
IEEE80211_CONN_DISABLE_HE |
IEEE80211_CONN_DISABLE_EHT;
goto out;
}
chandef->width = NL80211_CHAN_WIDTH_20;
ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
channel->band);
/* check that channel matches the right operating channel */
if (!tracking && channel->center_freq != ht_cfreq) {
/*
* It's possible that some APs are confused here;
* Netgear WNDR3700 sometimes reports 4 higher than
* the actual channel in association responses, but
* since we look at probe response/beacon data here
* it should be OK.
*/
sdata_info(sdata,
"Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
channel->center_freq, ht_cfreq,
ht_oper->primary_chan, channel->band);
ret = IEEE80211_CONN_DISABLE_HT |
IEEE80211_CONN_DISABLE_VHT |
IEEE80211_CONN_DISABLE_HE |
IEEE80211_CONN_DISABLE_EHT;
goto out;
}
/* check 40 MHz support, if we have it */
if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
ieee80211_chandef_ht_oper(ht_oper, chandef);
} else {
mlme_dbg(sdata, "40 MHz not supported\n");
/* 40 MHz (and 80 MHz) must be supported for VHT */
ret = IEEE80211_CONN_DISABLE_VHT;
/* also mark 40 MHz disabled */
ret |= IEEE80211_CONN_DISABLE_40MHZ;
goto out;
}
if (!vht_oper || !sband->vht_cap.vht_supported) {
mlme_dbg(sdata, "VHT operation missing / VHT not supported\n");
ret = IEEE80211_CONN_DISABLE_VHT;
goto out;
}
vht_chandef = *chandef;
if (!(conn_flags & IEEE80211_CONN_DISABLE_HE) &&
he_oper &&
(le32_to_cpu(he_oper->he_oper_params) &
IEEE80211_HE_OPERATION_VHT_OPER_INFO)) {
struct ieee80211_vht_operation he_oper_vht_cap;
/*
* Set only first 3 bytes (other 2 aren't used in
* ieee80211_chandef_vht_oper() anyway)
*/
memcpy(&he_oper_vht_cap, he_oper->optional, 3);
he_oper_vht_cap.basic_mcs_set = cpu_to_le16(0);
if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
&he_oper_vht_cap, ht_oper,
&vht_chandef)) {
if (!(conn_flags & IEEE80211_CONN_DISABLE_HE))
sdata_info(sdata,
"HE AP VHT information is invalid, disabling HE\n");
ret = IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT;
goto out;
}
} else if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
vht_cap_info,
vht_oper, ht_oper,
&vht_chandef)) {
if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information is invalid, disabling VHT\n");
ret = IEEE80211_CONN_DISABLE_VHT;
goto out;
}
if (!cfg80211_chandef_valid(&vht_chandef)) {
if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information is invalid, disabling VHT\n");
ret = IEEE80211_CONN_DISABLE_VHT;
goto out;
}
if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
ret = 0;
goto out;
}
if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information doesn't match HT, disabling VHT\n");
ret = IEEE80211_CONN_DISABLE_VHT;
goto out;
}
*chandef = vht_chandef;
/*
* handle the case that the EHT operation indicates that it holds EHT
* operation information (in case that the channel width differs from
* the channel width reported in HT/VHT/HE).
*/
if (eht_oper && (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) {
struct cfg80211_chan_def eht_chandef = *chandef;
ieee80211_chandef_eht_oper(eht_oper,
eht_chandef.width ==
NL80211_CHAN_WIDTH_160,
false, &eht_chandef);
if (!cfg80211_chandef_valid(&eht_chandef)) {
if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT))
sdata_info(sdata,
"AP EHT information is invalid, disabling EHT\n");
ret = IEEE80211_CONN_DISABLE_EHT;
goto out;
}
if (!cfg80211_chandef_compatible(chandef, &eht_chandef)) {
if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT))
sdata_info(sdata,
"AP EHT information is incompatible, disabling EHT\n");
ret = IEEE80211_CONN_DISABLE_EHT;
goto out;
}
*chandef = eht_chandef;
}
ret = 0;
out:
/*
* When tracking the current AP, don't do any further checks if the
* new chandef is identical to the one we're currently using for the
* connection. This keeps us from playing ping-pong with regulatory,
* without it the following can happen (for example):
* - connect to an AP with 80 MHz, world regdom allows 80 MHz
* - AP advertises regdom US
* - CRDA loads regdom US with 80 MHz prohibited (old database)
* - the code below detects an unsupported channel, downgrades, and
* we disconnect from the AP in the caller
* - disconnect causes CRDA to reload world regdomain and the game
* starts anew.
* (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
*
* It seems possible that there are still scenarios with CSA or real
* bandwidth changes where a this could happen, but those cases are
* less common and wouldn't completely prevent using the AP.
*/
if (tracking &&
cfg80211_chandef_identical(chandef, &link->conf->chandef))
return ret;
/* don't print the message below for VHT mismatch if VHT is disabled */
if (ret & IEEE80211_CONN_DISABLE_VHT)
vht_chandef = *chandef;
/*
* Ignore the DISABLED flag when we're already connected and only
* tracking the APs beacon for bandwidth changes - otherwise we
* might get disconnected here if we connect to an AP, update our
* regulatory information based on the AP's country IE and the
* information we have is wrong/outdated and disables the channel
* that we're actually using for the connection to the AP.
*/
while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
tracking ? 0 :
IEEE80211_CHAN_DISABLED)) {
if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
ret = IEEE80211_CONN_DISABLE_HT |
IEEE80211_CONN_DISABLE_VHT |
IEEE80211_CONN_DISABLE_HE |
IEEE80211_CONN_DISABLE_EHT;
break;
}
ret |= ieee80211_chandef_downgrade(chandef);
}
if (!he_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
IEEE80211_CHAN_NO_HE))
ret |= IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT;
if (!eht_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
IEEE80211_CHAN_NO_EHT))
ret |= IEEE80211_CONN_DISABLE_EHT;
if (chandef->width != vht_chandef.width && !tracking)
sdata_info(sdata,
"capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
return ret;
}
static int ieee80211_config_bw(struct ieee80211_link_data *link,
const struct ieee80211_ht_cap *ht_cap,
const struct ieee80211_vht_cap *vht_cap,
const struct ieee80211_ht_operation *ht_oper,
const struct ieee80211_vht_operation *vht_oper,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
const struct ieee80211_s1g_oper_ie *s1g_oper,
const u8 *bssid, u64 *changed)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_channel *chan = link->conf->chandef.chan;
struct ieee80211_supported_band *sband =
local->hw.wiphy->bands[chan->band];
struct cfg80211_chan_def chandef;
u16 ht_opmode;
ieee80211_conn_flags_t flags;
u32 vht_cap_info = 0;
int ret;
/* if HT was/is disabled, don't track any bandwidth changes */
if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT || !ht_oper)
return 0;
/* don't check VHT if we associated as non-VHT station */
if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)
vht_oper = NULL;
/* don't check HE if we associated as non-HE station */
if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE ||
!ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
he_oper = NULL;
eht_oper = NULL;
}
/* don't check EHT if we associated as non-EHT station */
if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT ||
!ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif))
eht_oper = NULL;
/*
* if bss configuration changed store the new one -
* this may be applicable even if channel is identical
*/
ht_opmode = le16_to_cpu(ht_oper->operation_mode);
if (link->conf->ht_operation_mode != ht_opmode) {
*changed |= BSS_CHANGED_HT;
link->conf->ht_operation_mode = ht_opmode;
}
if (vht_cap)
vht_cap_info = le32_to_cpu(vht_cap->vht_cap_info);
/* calculate new channel (type) based on HT/VHT/HE operation IEs */
flags = ieee80211_determine_chantype(sdata, link,
link->u.mgd.conn_flags,
sband, chan, vht_cap_info,
ht_oper, vht_oper,
he_oper, eht_oper,
s1g_oper, &chandef, true);
/*
* Downgrade the new channel if we associated with restricted
* capabilities. For example, if we associated as a 20 MHz STA
* to a 40 MHz AP (due to regulatory, capabilities or config
* reasons) then switching to a 40 MHz channel now won't do us
* any good -- we couldn't use it with the AP.
*/
if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ &&
chandef.width == NL80211_CHAN_WIDTH_80P80)
flags |= ieee80211_chandef_downgrade(&chandef);
if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_160MHZ &&
chandef.width == NL80211_CHAN_WIDTH_160)
flags |= ieee80211_chandef_downgrade(&chandef);
if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_40MHZ &&
chandef.width > NL80211_CHAN_WIDTH_20)
flags |= ieee80211_chandef_downgrade(&chandef);
if (cfg80211_chandef_identical(&chandef, &link->conf->chandef))
return 0;
link_info(link,
"AP %pM changed bandwidth, new config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n",
link->u.mgd.bssid, chandef.chan->center_freq,
chandef.chan->freq_offset, chandef.width,
chandef.center_freq1, chandef.freq1_offset,
chandef.center_freq2);
if (flags != (link->u.mgd.conn_flags &
(IEEE80211_CONN_DISABLE_HT |
IEEE80211_CONN_DISABLE_VHT |
IEEE80211_CONN_DISABLE_HE |
IEEE80211_CONN_DISABLE_EHT |
IEEE80211_CONN_DISABLE_40MHZ |
IEEE80211_CONN_DISABLE_80P80MHZ |
IEEE80211_CONN_DISABLE_160MHZ |
IEEE80211_CONN_DISABLE_320MHZ)) ||
!cfg80211_chandef_valid(&chandef)) {
sdata_info(sdata,
"AP %pM changed caps/bw in a way we can't support (0x%x/0x%x) - disconnect\n",
link->u.mgd.bssid, flags, ifmgd->flags);
return -EINVAL;
}
ret = ieee80211_link_change_bandwidth(link, &chandef, changed);
if (ret) {
sdata_info(sdata,
"AP %pM changed bandwidth to incompatible one - disconnect\n",
link->u.mgd.bssid);
return ret;
}
return 0;
}
/* frame sending functions */
static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ap_ht_param,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
enum ieee80211_smps_mode smps,
ieee80211_conn_flags_t conn_flags)
{
u8 *pos;
u32 flags = channel->flags;
u16 cap;
struct ieee80211_sta_ht_cap ht_cap;
BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
ieee80211_apply_htcap_overrides(sdata, &ht_cap);
/* determine capability flags */
cap = ht_cap.cap;
switch (ap_ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
cap &= ~IEEE80211_HT_CAP_SGI_40;
}
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
cap &= ~IEEE80211_HT_CAP_SGI_40;
}
break;
}
/*
* If 40 MHz was disabled associate as though we weren't
* capable of 40 MHz -- some broken APs will never fall
* back to trying to transmit in 20 MHz.
*/
if (conn_flags & IEEE80211_CONN_DISABLE_40MHZ) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
cap &= ~IEEE80211_HT_CAP_SGI_40;
}
/* set SM PS mode properly */
cap &= ~IEEE80211_HT_CAP_SM_PS;
switch (smps) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
fallthrough;
case IEEE80211_SMPS_OFF:
cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
IEEE80211_HT_CAP_SM_PS_SHIFT;
break;
case IEEE80211_SMPS_STATIC:
cap |= WLAN_HT_CAP_SM_PS_STATIC <<
IEEE80211_HT_CAP_SM_PS_SHIFT;
break;
case IEEE80211_SMPS_DYNAMIC:
cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
IEEE80211_HT_CAP_SM_PS_SHIFT;
break;
}
/* reserve and fill IE */
pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
}
/* This function determines vht capability flags for the association
* and builds the IE.
* Note - the function returns true to own the MU-MIMO capability
*/
static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_supported_band *sband,
struct ieee80211_vht_cap *ap_vht_cap,
ieee80211_conn_flags_t conn_flags)
{
struct ieee80211_local *local = sdata->local;
u8 *pos;
u32 cap;
struct ieee80211_sta_vht_cap vht_cap;
u32 mask, ap_bf_sts, our_bf_sts;
bool mu_mimo_owner = false;
BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
/* determine capability flags */
cap = vht_cap.cap;
if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ) {
u32 bw = cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ ||
bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
}
if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ) {
cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
}
/*
* Some APs apparently get confused if our capabilities are better
* than theirs, so restrict what we advertise in the assoc request.
*/
if (!(ap_vht_cap->vht_cap_info &
cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
else if (!(ap_vht_cap->vht_cap_info &
cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
/*
* If some other vif is using the MU-MIMO capability we cannot associate
* using MU-MIMO - this will lead to contradictions in the group-id
* mechanism.
* Ownership is defined since association request, in order to avoid
* simultaneous associations with MU-MIMO.
*/
if (cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) {
bool disable_mu_mimo = false;
struct ieee80211_sub_if_data *other;
list_for_each_entry_rcu(other, &local->interfaces, list) {
if (other->vif.bss_conf.mu_mimo_owner) {
disable_mu_mimo = true;
break;
}
}
if (disable_mu_mimo)
cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
else
mu_mimo_owner = true;
}
mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
ap_bf_sts = le32_to_cpu(ap_vht_cap->vht_cap_info) & mask;
our_bf_sts = cap & mask;
if (ap_bf_sts < our_bf_sts) {
cap &= ~mask;
cap |= ap_bf_sts;
}
/* reserve and fill IE */
pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
return mu_mimo_owner;
}
/* This function determines HE capability flags for the association
* and builds the IE.
*/
static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_supported_band *sband,
enum ieee80211_smps_mode smps_mode,
ieee80211_conn_flags_t conn_flags)
{
u8 *pos, *pre_he_pos;
const struct ieee80211_sta_he_cap *he_cap;
u8 he_cap_size;
he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
if (WARN_ON(!he_cap))
return;
/* get a max size estimate */
he_cap_size =
2 + 1 + sizeof(he_cap->he_cap_elem) +
ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
ieee80211_he_ppe_size(he_cap->ppe_thres[0],
he_cap->he_cap_elem.phy_cap_info);
pos = skb_put(skb, he_cap_size);
pre_he_pos = pos;
pos = ieee80211_ie_build_he_cap(conn_flags,
pos, he_cap, pos + he_cap_size);
/* trim excess if any */
skb_trim(skb, skb->len - (pre_he_pos + he_cap_size - pos));
ieee80211_ie_build_he_6ghz_cap(sdata, smps_mode, skb);
}
static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_supported_band *sband)
{
u8 *pos;
const struct ieee80211_sta_he_cap *he_cap;
const struct ieee80211_sta_eht_cap *eht_cap;
u8 eht_cap_size;
he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
/*
* EHT capabilities element is only added if the HE capabilities element
* was added so assume that 'he_cap' is valid and don't check it.
*/
if (WARN_ON(!he_cap || !eht_cap))
return;
eht_cap_size =
2 + 1 + sizeof(eht_cap->eht_cap_elem) +
ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
&eht_cap->eht_cap_elem,
false) +
ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
eht_cap->eht_cap_elem.phy_cap_info);
pos = skb_put(skb, eht_cap_size);
ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + eht_cap_size,
false);
}
static void ieee80211_assoc_add_rates(struct sk_buff *skb,
enum nl80211_chan_width width,
struct ieee80211_supported_band *sband,
struct ieee80211_mgd_assoc_data *assoc_data)
{
unsigned int shift = ieee80211_chanwidth_get_shift(width);
unsigned int rates_len, supp_rates_len;
u32 rates = 0;
int i, count;
u8 *pos;
if (assoc_data->supp_rates_len) {
/*
* Get all rates supported by the device and the AP as
* some APs don't like getting a superset of their rates
* in the association request (e.g. D-Link DAP 1353 in
* b-only mode)...
*/
rates_len = ieee80211_parse_bitrates(width, sband,
assoc_data->supp_rates,
assoc_data->supp_rates_len,
&rates);
} else {
/*
* In case AP not provide any supported rates information
* before association, we send information element(s) with
* all rates that we support.
*/
rates_len = sband->n_bitrates;
for (i = 0; i < sband->n_bitrates; i++)
rates |= BIT(i);
}
supp_rates_len = rates_len;
if (supp_rates_len > 8)
supp_rates_len = 8;
pos = skb_put(skb, supp_rates_len + 2);
*pos++ = WLAN_EID_SUPP_RATES;
*pos++ = supp_rates_len;
count = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if (BIT(i) & rates) {
int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
5 * (1 << shift));
*pos++ = (u8)rate;
if (++count == 8)
break;
}
}
if (rates_len > count) {
pos = skb_put(skb, rates_len - count + 2);
*pos++ = WLAN_EID_EXT_SUPP_RATES;
*pos++ = rates_len - count;
for (i++; i < sband->n_bitrates; i++) {
if (BIT(i) & rates) {
int rate;
rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
5 * (1 << shift));
*pos++ = (u8)rate;
}
}
}
}
static size_t ieee80211_add_before_ht_elems(struct sk_buff *skb,
const u8 *elems,
size_t elems_len,
size_t offset)
{
size_t noffset;
static const u8 before_ht[] = {
WLAN_EID_SSID,
WLAN_EID_SUPP_RATES,
WLAN_EID_EXT_SUPP_RATES,
WLAN_EID_PWR_CAPABILITY,
WLAN_EID_SUPPORTED_CHANNELS,
WLAN_EID_RSN,
WLAN_EID_QOS_CAPA,
WLAN_EID_RRM_ENABLED_CAPABILITIES,
WLAN_EID_MOBILITY_DOMAIN,
WLAN_EID_FAST_BSS_TRANSITION, /* reassoc only */
WLAN_EID_RIC_DATA, /* reassoc only */
WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
};
static const u8 after_ric[] = {
WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
WLAN_EID_HT_CAPABILITY,
WLAN_EID_BSS_COEX_2040,
/* luckily this is almost always there */
WLAN_EID_EXT_CAPABILITY,
WLAN_EID_QOS_TRAFFIC_CAPA,
WLAN_EID_TIM_BCAST_REQ,
WLAN_EID_INTERWORKING,
/* 60 GHz (Multi-band, DMG, MMS) can't happen */
WLAN_EID_VHT_CAPABILITY,
WLAN_EID_OPMODE_NOTIF,
};
if (!elems_len)
return offset;
noffset = ieee80211_ie_split_ric(elems, elems_len,
before_ht,
ARRAY_SIZE(before_ht),
after_ric,
ARRAY_SIZE(after_ric),
offset);
skb_put_data(skb, elems + offset, noffset - offset);
return noffset;
}
static size_t ieee80211_add_before_vht_elems(struct sk_buff *skb,
const u8 *elems,
size_t elems_len,
size_t offset)
{
static const u8 before_vht[] = {
/*
* no need to list the ones split off before HT
* or generated here
*/
WLAN_EID_BSS_COEX_2040,
WLAN_EID_EXT_CAPABILITY,
WLAN_EID_QOS_TRAFFIC_CAPA,
WLAN_EID_TIM_BCAST_REQ,
WLAN_EID_INTERWORKING,
/* 60 GHz (Multi-band, DMG, MMS) can't happen */
};
size_t noffset;
if (!elems_len)
return offset;
/* RIC already taken care of in ieee80211_add_before_ht_elems() */
noffset = ieee80211_ie_split(elems, elems_len,
before_vht, ARRAY_SIZE(before_vht),
offset);
skb_put_data(skb, elems + offset, noffset - offset);
return noffset;
}
static size_t ieee80211_add_before_he_elems(struct sk_buff *skb,
const u8 *elems,
size_t elems_len,
size_t offset)
{
static const u8 before_he[] = {
/*
* no need to list the ones split off before VHT
* or generated here
*/
WLAN_EID_OPMODE_NOTIF,
WLAN_EID_EXTENSION, WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE,
/* 11ai elements */
WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_SESSION,
WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_PUBLIC_KEY,
WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_KEY_CONFIRM,
WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_HLP_CONTAINER,
WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN,
/* TODO: add 11ah/11aj/11ak elements */
};
size_t noffset;
if (!elems_len)
return offset;
/* RIC already taken care of in ieee80211_add_before_ht_elems() */
noffset = ieee80211_ie_split(elems, elems_len,
before_he, ARRAY_SIZE(before_he),
offset);
skb_put_data(skb, elems + offset, noffset - offset);
return noffset;
}
#define PRESENT_ELEMS_MAX 8
#define PRESENT_ELEM_EXT_OFFS 0x100
static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u16 capab,
const struct element *ext_capa,
const u16 *present_elems);
static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u16 *capab,
const struct element *ext_capa,
const u8 *extra_elems,
size_t extra_elems_len,
unsigned int link_id,
struct ieee80211_link_data *link,
u16 *present_elems)
{
enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
struct ieee80211_channel *chan = cbss->channel;
const struct ieee80211_sband_iftype_data *iftd;
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_smps_mode smps_mode;
u16 orig_capab = *capab;
size_t offset = 0;
int present_elems_len = 0;
u8 *pos;
int i;
#define ADD_PRESENT_ELEM(id) do { \
/* need a last for termination - we use 0 == SSID */ \
if (!WARN_ON(present_elems_len >= PRESENT_ELEMS_MAX - 1)) \
present_elems[present_elems_len++] = (id); \
} while (0)
#define ADD_PRESENT_EXT_ELEM(id) ADD_PRESENT_ELEM(PRESENT_ELEM_EXT_OFFS | (id))
if (link)
smps_mode = link->smps_mode;
else if (sdata->u.mgd.powersave)
smps_mode = IEEE80211_SMPS_DYNAMIC;
else
smps_mode = IEEE80211_SMPS_OFF;
if (link) {
/*
* 5/10 MHz scenarios are only viable without MLO, in which
* case this pointer should be used ... All of this is a bit
* unclear though, not sure this even works at all.
*/
rcu_read_lock();
chanctx_conf = rcu_dereference(link->conf->chanctx_conf);
if (chanctx_conf)
width = chanctx_conf->def.width;
rcu_read_unlock();
}
sband = local->hw.wiphy->bands[chan->band];
iftd = ieee80211_get_sband_iftype_data(sband, iftype);
if (sband->band == NL80211_BAND_2GHZ) {
*capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
*capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
}
if ((cbss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
ieee80211_hw_check(&local->hw, SPECTRUM_MGMT))
*capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
if (sband->band != NL80211_BAND_S1GHZ)
ieee80211_assoc_add_rates(skb, width, sband, assoc_data);
if (*capab & WLAN_CAPABILITY_SPECTRUM_MGMT ||
*capab & WLAN_CAPABILITY_RADIO_MEASURE) {
struct cfg80211_chan_def chandef = {
.width = width,
.chan = chan,
};
pos = skb_put(skb, 4);
*pos++ = WLAN_EID_PWR_CAPABILITY;
*pos++ = 2;
*pos++ = 0; /* min tx power */
/* max tx power */
*pos++ = ieee80211_chandef_max_power(&chandef);
ADD_PRESENT_ELEM(WLAN_EID_PWR_CAPABILITY);
}
/*
* Per spec, we shouldn't include the list of channels if we advertise
* support for extended channel switching, but we've always done that;
* (for now?) apply this restriction only on the (new) 6 GHz band.
*/
if (*capab & WLAN_CAPABILITY_SPECTRUM_MGMT &&
(sband->band != NL80211_BAND_6GHZ ||
!ext_capa || ext_capa->datalen < 1 ||
!(ext_capa->data[0] & WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING))) {
/* TODO: get this in reg domain format */
pos = skb_put(skb, 2 * sband->n_channels + 2);
*pos++ = WLAN_EID_SUPPORTED_CHANNELS;
*pos++ = 2 * sband->n_channels;
for (i = 0; i < sband->n_channels; i++) {
int cf = sband->channels[i].center_freq;
*pos++ = ieee80211_frequency_to_channel(cf);
*pos++ = 1; /* one channel in the subband*/
}
ADD_PRESENT_ELEM(WLAN_EID_SUPPORTED_CHANNELS);
}
/* if present, add any custom IEs that go before HT */
offset = ieee80211_add_before_ht_elems(skb, extra_elems,
extra_elems_len,
offset);
if (sband->band != NL80211_BAND_6GHZ &&
!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT)) {
ieee80211_add_ht_ie(sdata, skb,
assoc_data->link[link_id].ap_ht_param,
sband, chan, smps_mode,
assoc_data->link[link_id].conn_flags);
ADD_PRESENT_ELEM(WLAN_EID_HT_CAPABILITY);
}
/* if present, add any custom IEs that go before VHT */
offset = ieee80211_add_before_vht_elems(skb, extra_elems,
extra_elems_len,
offset);
if (sband->band != NL80211_BAND_6GHZ &&
!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
bool mu_mimo_owner =
ieee80211_add_vht_ie(sdata, skb, sband,
&assoc_data->link[link_id].ap_vht_cap,
assoc_data->link[link_id].conn_flags);
if (link)
link->conf->mu_mimo_owner = mu_mimo_owner;
ADD_PRESENT_ELEM(WLAN_EID_VHT_CAPABILITY);
}
/*
* If AP doesn't support HT, mark HE and EHT as disabled.
* If on the 5GHz band, make sure it supports VHT.
*/
if (assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT ||
(sband->band == NL80211_BAND_5GHZ &&
assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT))
assoc_data->link[link_id].conn_flags |=
IEEE80211_CONN_DISABLE_HE |
IEEE80211_CONN_DISABLE_EHT;
/* if present, add any custom IEs that go before HE */
offset = ieee80211_add_before_he_elems(skb, extra_elems,
extra_elems_len,
offset);
if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HE)) {
ieee80211_add_he_ie(sdata, skb, sband, smps_mode,
assoc_data->link[link_id].conn_flags);
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_HE_CAPABILITY);
}
/*
* careful - need to know about all the present elems before
* calling ieee80211_assoc_add_ml_elem(), so add this one if
* we're going to put it after the ML element
*/
if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT))
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_EHT_CAPABILITY);
if (link_id == assoc_data->assoc_link_id)
ieee80211_assoc_add_ml_elem(sdata, skb, orig_capab, ext_capa,
present_elems);
/* crash if somebody gets it wrong */
present_elems = NULL;
if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT))
ieee80211_add_eht_ie(sdata, skb, sband);
if (sband->band == NL80211_BAND_S1GHZ) {
ieee80211_add_aid_request_ie(sdata, skb);
ieee80211_add_s1g_capab_ie(sdata, &sband->s1g_cap, skb);
}
if (iftd && iftd->vendor_elems.data && iftd->vendor_elems.len)
skb_put_data(skb, iftd->vendor_elems.data, iftd->vendor_elems.len);
if (link)
link->u.mgd.conn_flags = assoc_data->link[link_id].conn_flags;
return offset;
}
static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
const u16 *outer,
const u16 *inner)
{
unsigned int skb_len = skb->len;
bool at_extension = false;
bool added = false;
int i, j;
u8 *len, *list_len = NULL;
skb_put_u8(skb, WLAN_EID_EXTENSION);
len = skb_put(skb, 1);
skb_put_u8(skb, WLAN_EID_EXT_NON_INHERITANCE);
for (i = 0; i < PRESENT_ELEMS_MAX && outer[i]; i++) {
u16 elem = outer[i];
bool have_inner = false;
/* should at least be sorted in the sense of normal -> ext */
WARN_ON(at_extension && elem < PRESENT_ELEM_EXT_OFFS);
/* switch to extension list */
if (!at_extension && elem >= PRESENT_ELEM_EXT_OFFS) {
at_extension = true;
if (!list_len)
skb_put_u8(skb, 0);
list_len = NULL;
}
for (j = 0; j < PRESENT_ELEMS_MAX && inner[j]; j++) {
if (elem == inner[j]) {
have_inner = true;
break;
}
}
if (have_inner)
continue;
if (!list_len) {
list_len = skb_put(skb, 1);
*list_len = 0;
}
*list_len += 1;
skb_put_u8(skb, (u8)elem);
added = true;
}
/* if we added a list but no extension list, make a zero-len one */
if (added && (!at_extension || !list_len))
skb_put_u8(skb, 0);
/* if nothing added remove extension element completely */
if (!added)
skb_trim(skb, skb_len);
else
*len = skb->len - skb_len - 2;
}
static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u16 capab,
const struct element *ext_capa,
const u16 *outer_present_elems)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
struct ieee80211_multi_link_elem *ml_elem;
struct ieee80211_mle_basic_common_info *common;
const struct wiphy_iftype_ext_capab *ift_ext_capa;
__le16 eml_capa = 0, mld_capa_ops = 0;
unsigned int link_id;
u8 *ml_elem_len;
void *capab_pos;
if (!ieee80211_vif_is_mld(&sdata->vif))
return;
ift_ext_capa = cfg80211_get_iftype_ext_capa(local->hw.wiphy,
ieee80211_vif_type_p2p(&sdata->vif));
if (ift_ext_capa) {
eml_capa = cpu_to_le16(ift_ext_capa->eml_capabilities);
mld_capa_ops = cpu_to_le16(ift_ext_capa->mld_capa_and_ops);
}
skb_put_u8(skb, WLAN_EID_EXTENSION);
ml_elem_len = skb_put(skb, 1);
skb_put_u8(skb, WLAN_EID_EXT_EHT_MULTI_LINK);
ml_elem = skb_put(skb, sizeof(*ml_elem));
ml_elem->control =
cpu_to_le16(IEEE80211_ML_CONTROL_TYPE_BASIC |
IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP);
common = skb_put(skb, sizeof(*common));
common->len = sizeof(*common) +
2; /* MLD capa/ops */
memcpy(common->mld_mac_addr, sdata->vif.addr, ETH_ALEN);
/* add EML_CAPA only if needed, see Draft P802.11be_D2.1, 35.3.17 */
if (eml_capa &
cpu_to_le16((IEEE80211_EML_CAP_EMLSR_SUPP |
IEEE80211_EML_CAP_EMLMR_SUPPORT))) {
common->len += 2; /* EML capabilities */
ml_elem->control |=
cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EML_CAPA);
skb_put_data(skb, &eml_capa, sizeof(eml_capa));
}
/* need indication from userspace to support this */
mld_capa_ops &= ~cpu_to_le16(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP);
skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops));
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
u16 link_present_elems[PRESENT_ELEMS_MAX] = {};
const u8 *extra_elems;
size_t extra_elems_len;
size_t extra_used;
u8 *subelem_len = NULL;
__le16 ctrl;
if (!assoc_data->link[link_id].bss ||
link_id == assoc_data->assoc_link_id)
continue;
extra_elems = assoc_data->link[link_id].elems;
extra_elems_len = assoc_data->link[link_id].elems_len;
skb_put_u8(skb, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE);
subelem_len = skb_put(skb, 1);
ctrl = cpu_to_le16(link_id |
IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE |
IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT);
skb_put_data(skb, &ctrl, sizeof(ctrl));
skb_put_u8(skb, 1 + ETH_ALEN); /* STA Info Length */
skb_put_data(skb, assoc_data->link[link_id].addr,
ETH_ALEN);
/*
* Now add the contents of the (re)association request,
* but the "listen interval" and "current AP address"
* (if applicable) are skipped. So we only have
* the capability field (remember the position and fill
* later), followed by the elements added below by
* calling ieee80211_assoc_link_elems().
*/
capab_pos = skb_put(skb, 2);
extra_used = ieee80211_assoc_link_elems(sdata, skb, &capab,
ext_capa,
extra_elems,
extra_elems_len,
link_id, NULL,
link_present_elems);
if (extra_elems)
skb_put_data(skb, extra_elems + extra_used,
extra_elems_len - extra_used);
put_unaligned_le16(capab, capab_pos);
ieee80211_add_non_inheritance_elem(skb, outer_present_elems,
link_present_elems);
ieee80211_fragment_element(skb, subelem_len,
IEEE80211_MLE_SUBELEM_FRAGMENT);
}
ieee80211_fragment_element(skb, ml_elem_len, WLAN_EID_FRAGMENT);
}
static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
struct ieee80211_link_data *link;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
u8 *pos, qos_info, *ie_start;
size_t offset, noffset;
u16 capab = WLAN_CAPABILITY_ESS, link_capab;
__le16 listen_int;
struct element *ext_capa = NULL;
enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
struct ieee80211_prep_tx_info info = {};
unsigned int link_id, n_links = 0;
u16 present_elems[PRESENT_ELEMS_MAX] = {};
void *capab_pos;
size_t size;
int ret;
/* we know it's writable, cast away the const */
if (assoc_data->ie_len)
ext_capa = (void *)cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
assoc_data->ie,
assoc_data->ie_len);
sdata_assert_lock(sdata);
size = local->hw.extra_tx_headroom +
sizeof(*mgmt) + /* bit too much but doesn't matter */
2 + assoc_data->ssid_len + /* SSID */
assoc_data->ie_len + /* extra IEs */
(assoc_data->fils_kek_len ? 16 /* AES-SIV */ : 0) +
9; /* WMM */
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
const struct ieee80211_sband_iftype_data *iftd;
struct ieee80211_supported_band *sband;
if (!cbss)
continue;
sband = local->hw.wiphy->bands[cbss->channel->band];
n_links++;
/* add STA profile elements length */
size += assoc_data->link[link_id].elems_len;
/* and supported rates length */
size += 4 + sband->n_bitrates;
/* supported channels */
size += 2 + 2 * sband->n_channels;
iftd = ieee80211_get_sband_iftype_data(sband, iftype);
if (iftd)
size += iftd->vendor_elems.len;
/* power capability */
size += 4;
/* HT, VHT, HE, EHT */
size += 2 + sizeof(struct ieee80211_ht_cap);
size += 2 + sizeof(struct ieee80211_vht_cap);
size += 2 + 1 + sizeof(struct ieee80211_he_cap_elem) +
sizeof(struct ieee80211_he_mcs_nss_supp) +
IEEE80211_HE_PPE_THRES_MAX_LEN;
if (sband->band == NL80211_BAND_6GHZ)
size += 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
size += 2 + 1 + sizeof(struct ieee80211_eht_cap_elem) +
sizeof(struct ieee80211_eht_mcs_nss_supp) +
IEEE80211_EHT_PPE_THRES_MAX_LEN;
/* non-inheritance element */
size += 2 + 2 + PRESENT_ELEMS_MAX;
/* should be the same across all BSSes */
if (cbss->capability & WLAN_CAPABILITY_PRIVACY)
capab |= WLAN_CAPABILITY_PRIVACY;
}
if (ieee80211_vif_is_mld(&sdata->vif)) {
/* consider the multi-link element with STA profile */
size += sizeof(struct ieee80211_multi_link_elem);
/* max common info field in basic multi-link element */
size += sizeof(struct ieee80211_mle_basic_common_info) +
2 + /* capa & op */
2; /* EML capa */
/*
* The capability elements were already considered above;
* note this over-estimates a bit because there's no
* STA profile for the assoc link.
*/
size += (n_links - 1) *
(1 + 1 + /* subelement ID/length */
2 + /* STA control */
1 + ETH_ALEN + 2 /* STA Info field */);
}
link = sdata_dereference(sdata->link[assoc_data->assoc_link_id], sdata);
if (WARN_ON(!link))
return -EINVAL;
if (WARN_ON(!assoc_data->link[assoc_data->assoc_link_id].bss))
return -EINVAL;
skb = alloc_skb(size, GFP_KERNEL);
if (!skb)
return -ENOMEM;
skb_reserve(skb, local->hw.extra_tx_headroom);
if (ifmgd->flags & IEEE80211_STA_ENABLE_RRM)
capab |= WLAN_CAPABILITY_RADIO_MEASURE;
/* Set MBSSID support for HE AP if needed */
if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID) &&
!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
ext_capa && ext_capa->datalen >= 3)
ext_capa->data[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT;
mgmt = skb_put_zero(skb, 24);
memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
listen_int = cpu_to_le16(assoc_data->s1g ?
ieee80211_encode_usf(local->hw.conf.listen_interval) :
local->hw.conf.listen_interval);
if (!is_zero_ether_addr(assoc_data->prev_ap_addr)) {
skb_put(skb, 10);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_REASSOC_REQ);
capab_pos = &mgmt->u.reassoc_req.capab_info;
mgmt->u.reassoc_req.listen_interval = listen_int;
memcpy(mgmt->u.reassoc_req.current_ap,
assoc_data->prev_ap_addr, ETH_ALEN);
info.subtype = IEEE80211_STYPE_REASSOC_REQ;
} else {
skb_put(skb, 4);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ASSOC_REQ);
capab_pos = &mgmt->u.assoc_req.capab_info;
mgmt->u.assoc_req.listen_interval = listen_int;
info.subtype = IEEE80211_STYPE_ASSOC_REQ;
}
/* SSID */
pos = skb_put(skb, 2 + assoc_data->ssid_len);
ie_start = pos;
*pos++ = WLAN_EID_SSID;
*pos++ = assoc_data->ssid_len;
memcpy(pos, assoc_data->ssid, assoc_data->ssid_len);
/* add the elements for the assoc (main) link */
link_capab = capab;
offset = ieee80211_assoc_link_elems(sdata, skb, &link_capab,
ext_capa,
assoc_data->ie,
assoc_data->ie_len,
assoc_data->assoc_link_id, link,
present_elems);
put_unaligned_le16(link_capab, capab_pos);
/* if present, add any custom non-vendor IEs */
if (assoc_data->ie_len) {
noffset = ieee80211_ie_split_vendor(assoc_data->ie,
assoc_data->ie_len,
offset);
skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
offset = noffset;
}
if (assoc_data->wmm) {
if (assoc_data->uapsd) {
qos_info = ifmgd->uapsd_queues;
qos_info |= (ifmgd->uapsd_max_sp_len <<
IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
} else {
qos_info = 0;
}
pos = ieee80211_add_wmm_info_ie(skb_put(skb, 9), qos_info);
}
/* add any remaining custom (i.e. vendor specific here) IEs */
if (assoc_data->ie_len) {
noffset = assoc_data->ie_len;
skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
}
if (assoc_data->fils_kek_len) {
ret = fils_encrypt_assoc_req(skb, assoc_data);
if (ret < 0) {
dev_kfree_skb(skb);
return ret;
}
}
pos = skb_tail_pointer(skb);
kfree(ifmgd->assoc_req_ies);
ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC);
if (!ifmgd->assoc_req_ies) {
dev_kfree_skb(skb);
return -ENOMEM;
}
ifmgd->assoc_req_ies_len = pos - ie_start;
drv_mgd_prepare_tx(local, sdata, &info);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
ieee80211_tx_skb(sdata, skb);
return 0;
}
void ieee80211_send_pspoll(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_pspoll *pspoll;
struct sk_buff *skb;
skb = ieee80211_pspoll_get(&local->hw, &sdata->vif);
if (!skb)
return;
pspoll = (struct ieee80211_pspoll *) skb->data;
pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
}
void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
bool powersave)
{
struct sk_buff *skb;
struct ieee80211_hdr_3addr *nullfunc;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, -1,
!ieee80211_hw_check(&local->hw,
DOESNT_SUPPORT_QOS_NDP));
if (!skb)
return;
nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
if (powersave)
nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
ieee80211_tx_skb(sdata, skb);
}
void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
struct sk_buff *skb;
struct ieee80211_hdr *nullfunc;
__le16 fc;
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
return;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
nullfunc = skb_put_zero(skb, 30);
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
nullfunc->frame_control = fc;
memcpy(nullfunc->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
memcpy(nullfunc->addr3, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
ieee80211_tx_skb(sdata, skb);
}
/* spectrum management related things */
static void ieee80211_chswitch_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
struct ieee80211_link_data *link =
container_of(work, struct ieee80211_link_data,
u.mgd.chswitch_work.work);
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
int ret;
if (!ieee80211_sdata_running(sdata))
return;
sdata_lock(sdata);
mutex_lock(&local->mtx);
mutex_lock(&local->chanctx_mtx);
if (!ifmgd->associated)
goto out;
if (!link->conf->csa_active)
goto out;
/*
* using reservation isn't immediate as it may be deferred until later
* with multi-vif. once reservation is complete it will re-schedule the
* work with no reserved_chanctx so verify chandef to check if it
* completed successfully
*/
if (link->reserved_chanctx) {
/*
* with multi-vif csa driver may call ieee80211_csa_finish()
* many times while waiting for other interfaces to use their
* reservations
*/
if (link->reserved_ready)
goto out;
ret = ieee80211_link_use_reserved_context(link);
if (ret) {
sdata_info(sdata,
"failed to use reserved channel context, disconnecting (err=%d)\n",
ret);
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
goto out;
}
goto out;
}
if (!cfg80211_chandef_identical(&link->conf->chandef,
&link->csa_chandef)) {
sdata_info(sdata,
"failed to finalize channel switch, disconnecting\n");
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
goto out;
}
link->u.mgd.csa_waiting_bcn = true;
ieee80211_sta_reset_beacon_monitor(sdata);
ieee80211_sta_reset_conn_monitor(sdata);
out:
mutex_unlock(&local->chanctx_mtx);
mutex_unlock(&local->mtx);
sdata_unlock(sdata);
}
static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
int ret;
sdata_assert_lock(sdata);
WARN_ON(!link->conf->csa_active);
if (link->csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
link->csa_block_tx = false;
}
link->conf->csa_active = false;
link->u.mgd.csa_waiting_bcn = false;
/*
* If the CSA IE is still present on the beacon after the switch,
* we need to consider it as a new CSA (possibly to self).
*/
link->u.mgd.beacon_crc_valid = false;
ret = drv_post_channel_switch(sdata);
if (ret) {
sdata_info(sdata,
"driver post channel switch failed, disconnecting\n");
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
return;
}
cfg80211_ch_switch_notify(sdata->dev, &link->reserved_chandef, 0, 0);
}
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
success = false;
trace_api_chswitch_done(sdata, success);
if (!success) {
sdata_info(sdata,
"driver channel switch failed, disconnecting\n");
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
} else {
wiphy_delayed_work_queue(sdata->local->hw.wiphy,
&sdata->deflink.u.mgd.chswitch_work,
0);
}
}
EXPORT_SYMBOL(ieee80211_chswitch_done);
static void
ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
if (!local->ops->abort_channel_switch)
return;
mutex_lock(&local->mtx);
mutex_lock(&local->chanctx_mtx);
ieee80211_link_unreserve_chanctx(link);
mutex_unlock(&local->chanctx_mtx);
if (link->csa_block_tx)
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
link->csa_block_tx = false;
link->conf->csa_active = false;
mutex_unlock(&local->mtx);
drv_abort_channel_switch(sdata);
}
static void
ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
u64 timestamp, u32 device_timestamp,
struct ieee802_11_elems *elems,
bool beacon)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct cfg80211_bss *cbss = link->u.mgd.bss;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *chanctx;
enum nl80211_band current_band;
struct ieee80211_csa_ie csa_ie;
struct ieee80211_channel_switch ch_switch;
struct ieee80211_bss *bss;
unsigned long timeout;
int res;
sdata_assert_lock(sdata);
if (!cbss)
return;
current_band = cbss->channel->band;
bss = (void *)cbss->priv;
res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
bss->vht_cap_info,
link->u.mgd.conn_flags,
link->u.mgd.bssid, &csa_ie);
if (!res) {
ch_switch.timestamp = timestamp;
ch_switch.device_timestamp = device_timestamp;
ch_switch.block_tx = csa_ie.mode;
ch_switch.chandef = csa_ie.chandef;
ch_switch.count = csa_ie.count;
ch_switch.delay = csa_ie.max_switch_time;
}
if (res < 0)
goto lock_and_drop_connection;
if (beacon && link->conf->csa_active &&
!link->u.mgd.csa_waiting_bcn) {
if (res)
ieee80211_sta_abort_chanswitch(link);
else
drv_channel_switch_rx_beacon(sdata, &ch_switch);
return;
} else if (link->conf->csa_active || res) {
/* disregard subsequent announcements if already processing */
return;
}
if (link->conf->chandef.chan->band !=
csa_ie.chandef.chan->band) {
sdata_info(sdata,
"AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
link->u.mgd.bssid,
csa_ie.chandef.chan->center_freq,
csa_ie.chandef.width, csa_ie.chandef.center_freq1,
csa_ie.chandef.center_freq2);
goto lock_and_drop_connection;
}
if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef,
IEEE80211_CHAN_DISABLED)) {
sdata_info(sdata,
"AP %pM switches to unsupported channel "
"(%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), "
"disconnecting\n",
link->u.mgd.bssid,
csa_ie.chandef.chan->center_freq,
csa_ie.chandef.chan->freq_offset,
csa_ie.chandef.width, csa_ie.chandef.center_freq1,
csa_ie.chandef.freq1_offset,
csa_ie.chandef.center_freq2);
goto lock_and_drop_connection;
}
if (cfg80211_chandef_identical(&csa_ie.chandef,
&link->conf->chandef) &&
(!csa_ie.mode || !beacon)) {
if (link->u.mgd.csa_ignored_same_chan)
return;
sdata_info(sdata,
"AP %pM tries to chanswitch to same channel, ignore\n",
link->u.mgd.bssid);
link->u.mgd.csa_ignored_same_chan = true;
return;
}
/*
* Drop all TDLS peers - either we disconnect or move to a different
* channel from this point on. There's no telling what our peer will do.
* The TDLS WIDER_BW scenario is also problematic, as peers might now
* have an incompatible wider chandef.
*/
ieee80211_teardown_tdls_peers(sdata);
mutex_lock(&local->mtx);
mutex_lock(&local->chanctx_mtx);
conf = rcu_dereference_protected(link->conf->chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
if (!conf) {
sdata_info(sdata,
"no channel context assigned to vif?, disconnecting\n");
goto drop_connection;
}
chanctx = container_of(conf, struct ieee80211_chanctx, conf);
if (local->use_chanctx &&
!ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
sdata_info(sdata,
"driver doesn't support chan-switch with channel contexts\n");
goto drop_connection;
}
if (drv_pre_channel_switch(sdata, &ch_switch)) {
sdata_info(sdata,
"preparing for channel switch failed, disconnecting\n");
goto drop_connection;
}
res = ieee80211_link_reserve_chanctx(link, &csa_ie.chandef,
chanctx->mode, false);
if (res) {
sdata_info(sdata,
"failed to reserve channel context for channel switch, disconnecting (err=%d)\n",
res);
goto drop_connection;
}
mutex_unlock(&local->chanctx_mtx);
link->conf->csa_active = true;
link->csa_chandef = csa_ie.chandef;
link->csa_block_tx = csa_ie.mode;
link->u.mgd.csa_ignored_same_chan = false;
link->u.mgd.beacon_crc_valid = false;
if (link->csa_block_tx)
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
mutex_unlock(&local->mtx);
cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef,
link->link_id, csa_ie.count,
csa_ie.mode, 0);
if (local->ops->channel_switch) {
/* use driver's channel switch callback */
drv_channel_switch(local, sdata, &ch_switch);
return;
}
/* channel switch handled in software */
timeout = TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) *
cbss->beacon_interval);
wiphy_delayed_work_queue(local->hw.wiphy,
&link->u.mgd.chswitch_work,
timeout);
return;
lock_and_drop_connection:
mutex_lock(&local->mtx);
mutex_lock(&local->chanctx_mtx);
drop_connection:
/*
* This is just so that the disconnect flow will know that
* we were trying to switch channel and failed. In case the
* mode is 1 (we are not allowed to Tx), we will know not to
* send a deauthentication frame. Those two fields will be
* reset when the disconnection worker runs.
*/
link->conf->csa_active = true;
link->csa_block_tx = csa_ie.mode;
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
mutex_unlock(&local->chanctx_mtx);
mutex_unlock(&local->mtx);
}
static bool
ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *channel,
const u8 *country_ie, u8 country_ie_len,
const u8 *pwr_constr_elem,
int *chan_pwr, int *pwr_reduction)
{
struct ieee80211_country_ie_triplet *triplet;
int chan = ieee80211_frequency_to_channel(channel->center_freq);
int i, chan_increment;
bool have_chan_pwr = false;
/* Invalid IE */
if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
return false;
triplet = (void *)(country_ie + 3);
country_ie_len -= 3;
switch (channel->band) {
default:
WARN_ON_ONCE(1);
fallthrough;
case NL80211_BAND_2GHZ:
case NL80211_BAND_60GHZ:
case NL80211_BAND_LC:
chan_increment = 1;
break;
case NL80211_BAND_5GHZ:
chan_increment = 4;
break;
case NL80211_BAND_6GHZ:
/*
* In the 6 GHz band, the "maximum transmit power level"
* field in the triplets is reserved, and thus will be
* zero and we shouldn't use it to control TX power.
* The actual TX power will be given in the transmit
* power envelope element instead.
*/
return false;
}
/* find channel */
while (country_ie_len >= 3) {
u8 first_channel = triplet->chans.first_channel;
if (first_channel >= IEEE80211_COUNTRY_EXTENSION_ID)
goto next;
for (i = 0; i < triplet->chans.num_channels; i++) {
if (first_channel + i * chan_increment == chan) {
have_chan_pwr = true;
*chan_pwr = triplet->chans.max_power;
break;
}
}
if (have_chan_pwr)
break;
next:
triplet++;
country_ie_len -= 3;
}
if (have_chan_pwr && pwr_constr_elem)
*pwr_reduction = *pwr_constr_elem;
else
*pwr_reduction = 0;
return have_chan_pwr;
}
static void ieee80211_find_cisco_dtpc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *channel,
const u8 *cisco_dtpc_ie,
int *pwr_level)
{
/* From practical testing, the first data byte of the DTPC element
* seems to contain the requested dBm level, and the CLI on Cisco
* APs clearly state the range is -127 to 127 dBm, which indicates
* a signed byte, although it seemingly never actually goes negative.
* The other byte seems to always be zero.
*/
*pwr_level = (__s8)cisco_dtpc_ie[4];
}
static u64 ieee80211_handle_pwr_constr(struct ieee80211_link_data *link,
struct ieee80211_channel *channel,
struct ieee80211_mgmt *mgmt,
const u8 *country_ie, u8 country_ie_len,
const u8 *pwr_constr_ie,
const u8 *cisco_dtpc_ie)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
bool has_80211h_pwr = false, has_cisco_pwr = false;
int chan_pwr = 0, pwr_reduction_80211h = 0;
int pwr_level_cisco, pwr_level_80211h;
int new_ap_level;
__le16 capab = mgmt->u.probe_resp.capab_info;
if (ieee80211_is_s1g_beacon(mgmt->frame_control))
return 0; /* TODO */
if (country_ie &&
(capab & cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT) ||
capab & cpu_to_le16(WLAN_CAPABILITY_RADIO_MEASURE))) {
has_80211h_pwr = ieee80211_find_80211h_pwr_constr(
sdata, channel, country_ie, country_ie_len,
pwr_constr_ie, &chan_pwr, &pwr_reduction_80211h);
pwr_level_80211h =
max_t(int, 0, chan_pwr - pwr_reduction_80211h);
}
if (cisco_dtpc_ie) {
ieee80211_find_cisco_dtpc(
sdata, channel, cisco_dtpc_ie, &pwr_level_cisco);
has_cisco_pwr = true;
}
if (!has_80211h_pwr && !has_cisco_pwr)
return 0;
/* If we have both 802.11h and Cisco DTPC, apply both limits
* by picking the smallest of the two power levels advertised.
*/
if (has_80211h_pwr &&
(!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) {
new_ap_level = pwr_level_80211h;
if (link->ap_power_level == new_ap_level)
return 0;
sdata_dbg(sdata,
"Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
pwr_level_80211h, chan_pwr, pwr_reduction_80211h,
link->u.mgd.bssid);
} else { /* has_cisco_pwr is always true here. */
new_ap_level = pwr_level_cisco;
if (link->ap_power_level == new_ap_level)
return 0;
sdata_dbg(sdata,
"Limiting TX power to %d dBm as advertised by %pM\n",
pwr_level_cisco, link->u.mgd.bssid);
}
link->ap_power_level = new_ap_level;
if (__ieee80211_recalc_txpower(sdata))
return BSS_CHANGED_TXPOWER;
return 0;
}
/* powersave */
static void ieee80211_enable_ps(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_conf *conf = &local->hw.conf;
/*
* If we are scanning right now then the parameters will
* take effect when scan finishes.
*/
if (local->scanning)
return;
if (conf->dynamic_ps_timeout > 0 &&
!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) {
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(conf->dynamic_ps_timeout));
} else {
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
ieee80211_send_nullfunc(local, sdata, true);
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
return;
conf->flags |= IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
}
static void ieee80211_change_ps(struct ieee80211_local *local)
{
struct ieee80211_conf *conf = &local->hw.conf;
if (local->ps_sdata) {
ieee80211_enable_ps(local, local->ps_sdata);
} else if (conf->flags & IEEE80211_CONF_PS) {
conf->flags &= ~IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
del_timer_sync(&local->dynamic_ps_timer);
cancel_work_sync(&local->dynamic_ps_enable_work);
}
}
static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *mgd = &sdata->u.mgd;
struct sta_info *sta = NULL;
bool authorized = false;
if (!mgd->powersave)
return false;
if (mgd->broken_ap)
return false;
if (!mgd->associated)
return false;
if (mgd->flags & IEEE80211_STA_CONNECTION_POLL)
return false;
if (!(local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO) &&
!sdata->deflink.u.mgd.have_beacon)
return false;
rcu_read_lock();
sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
if (sta)
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
rcu_read_unlock();
return authorized;
}
/* need to hold RTNL or interface lock */
void ieee80211_recalc_ps(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata, *found = NULL;
int count = 0;
int timeout;
if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS) ||
ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) {
local->ps_sdata = NULL;
return;
}
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type == NL80211_IFTYPE_AP) {
/* If an AP vif is found, then disable PS
* by setting the count to zero thereby setting
* ps_sdata to NULL.
*/
count = 0;
break;
}
if (sdata->vif.type != NL80211_IFTYPE_STATION)
continue;
found = sdata;
count++;
}
if (count == 1 && ieee80211_powersave_allowed(found)) {
u8 dtimper = found->deflink.u.mgd.dtim_period;
timeout = local->dynamic_ps_forced_timeout;
if (timeout < 0)
timeout = 100;
local->hw.conf.dynamic_ps_timeout = timeout;
/* If the TIM IE is invalid, pretend the value is 1 */
if (!dtimper)
dtimper = 1;
local->hw.conf.ps_dtim_period = dtimper;
local->ps_sdata = found;
} else {
local->ps_sdata = NULL;
}
ieee80211_change_ps(local);
}
void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata)
{
bool ps_allowed = ieee80211_powersave_allowed(sdata);
if (sdata->vif.cfg.ps != ps_allowed) {
sdata->vif.cfg.ps = ps_allowed;
ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_PS);
}
}
void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local,
dynamic_ps_disable_work);
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
ieee80211_wake_queues_by_reason(&local->hw,
IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_PS,
false);
}
void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local,
dynamic_ps_enable_work);
struct ieee80211_sub_if_data *sdata = local->ps_sdata;
struct ieee80211_if_managed *ifmgd;
unsigned long flags;
int q;
/* can only happen when PS was just disabled anyway */
if (!sdata)
return;
ifmgd = &sdata->u.mgd;
if (local->hw.conf.flags & IEEE80211_CONF_PS)
return;
if (local->hw.conf.dynamic_ps_timeout > 0) {
/* don't enter PS if TX frames are pending */
if (drv_tx_frames_pending(local)) {
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(
local->hw.conf.dynamic_ps_timeout));
return;
}
/*
* transmission can be stopped by others which leads to
* dynamic_ps_timer expiry. Postpone the ps timer if it
* is not the actual idle state.
*/
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (q = 0; q < local->hw.queues; q++) {
if (local->queue_stop_reasons[q]) {
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
flags);
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(
local->hw.conf.dynamic_ps_timeout));
return;
}
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
if (drv_tx_frames_pending(local)) {
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(
local->hw.conf.dynamic_ps_timeout));
} else {
ieee80211_send_nullfunc(local, sdata, true);
/* Flush to get the tx status of nullfunc frame */
ieee80211_flush_queues(local, sdata, false);
}
}
if (!(ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) ||
(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
local->hw.conf.flags |= IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
}
void ieee80211_dynamic_ps_timer(struct timer_list *t)
{
struct ieee80211_local *local = from_timer(local, t, dynamic_ps_timer);
ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work);
}
void ieee80211_dfs_cac_timer_work(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct ieee80211_link_data *link =
container_of(delayed_work, struct ieee80211_link_data,
dfs_cac_timer_work);
struct cfg80211_chan_def chandef = link->conf->chandef;
struct ieee80211_sub_if_data *sdata = link->sdata;
mutex_lock(&sdata->local->mtx);
if (sdata->wdev.cac_started) {
ieee80211_link_release_channel(link);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_FINISHED,
GFP_KERNEL);
}
mutex_unlock(&sdata->local->mtx);
}
static bool
__ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
bool ret = false;
int ac;
if (local->hw.queues < IEEE80211_NUM_ACS)
return false;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac];
int non_acm_ac;
unsigned long now = jiffies;
if (tx_tspec->action == TX_TSPEC_ACTION_NONE &&
tx_tspec->admitted_time &&
time_after(now, tx_tspec->time_slice_start + HZ)) {
tx_tspec->consumed_tx_time = 0;
tx_tspec->time_slice_start = now;
if (tx_tspec->downgraded)
tx_tspec->action =
TX_TSPEC_ACTION_STOP_DOWNGRADE;
}
switch (tx_tspec->action) {
case TX_TSPEC_ACTION_STOP_DOWNGRADE:
/* take the original parameters */
if (drv_conf_tx(local, &sdata->deflink, ac,
&sdata->deflink.tx_conf[ac]))
link_err(&sdata->deflink,
"failed to set TX queue parameters for queue %d\n",
ac);
tx_tspec->action = TX_TSPEC_ACTION_NONE;
tx_tspec->downgraded = false;
ret = true;
break;
case TX_TSPEC_ACTION_DOWNGRADE:
if (time_after(now, tx_tspec->time_slice_start + HZ)) {
tx_tspec->action = TX_TSPEC_ACTION_NONE;
ret = true;
break;
}
/* downgrade next lower non-ACM AC */
for (non_acm_ac = ac + 1;
non_acm_ac < IEEE80211_NUM_ACS;
non_acm_ac++)
if (!(sdata->wmm_acm & BIT(7 - 2 * non_acm_ac)))
break;
/* Usually the loop will result in using BK even if it
* requires admission control, but such a configuration
* makes no sense and we have to transmit somehow - the
* AC selection does the same thing.
* If we started out trying to downgrade from BK, then
* the extra condition here might be needed.
*/
if (non_acm_ac >= IEEE80211_NUM_ACS)
non_acm_ac = IEEE80211_AC_BK;
if (drv_conf_tx(local, &sdata->deflink, ac,
&sdata->deflink.tx_conf[non_acm_ac]))
link_err(&sdata->deflink,
"failed to set TX queue parameters for queue %d\n",
ac);
tx_tspec->action = TX_TSPEC_ACTION_NONE;
ret = true;
schedule_delayed_work(&ifmgd->tx_tspec_wk,
tx_tspec->time_slice_start + HZ - now + 1);
break;
case TX_TSPEC_ACTION_NONE:
/* nothing now */
break;
}
}
return ret;
}
void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
{
if (__ieee80211_sta_handle_tspec_ac_params(sdata))
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_QOS);
}
static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work)
{
struct ieee80211_sub_if_data *sdata;
sdata = container_of(work, struct ieee80211_sub_if_data,
u.mgd.tx_tspec_wk.work);
ieee80211_sta_handle_tspec_ac_params(sdata);
}
void ieee80211_mgd_set_link_qos_params(struct ieee80211_link_data *link)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_tx_queue_params *params = link->tx_conf;
u8 ac;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
mlme_dbg(sdata,
"WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
ac, params[ac].acm,
params[ac].aifs, params[ac].cw_min, params[ac].cw_max,
params[ac].txop, params[ac].uapsd,
ifmgd->tx_tspec[ac].downgraded);
if (!ifmgd->tx_tspec[ac].downgraded &&
drv_conf_tx(local, link, ac, ¶ms[ac]))
link_err(link,
"failed to set TX queue parameters for AC %d\n",
ac);
}
}
/* MLME */
static bool
ieee80211_sta_wmm_params(struct ieee80211_local *local,
struct ieee80211_link_data *link,
const u8 *wmm_param, size_t wmm_param_len,
const struct ieee80211_mu_edca_param_set *mu_edca)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_tx_queue_params params[IEEE80211_NUM_ACS];
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
size_t left;
int count, mu_edca_count, ac;
const u8 *pos;
u8 uapsd_queues = 0;
if (!local->ops->conf_tx)
return false;
if (local->hw.queues < IEEE80211_NUM_ACS)
return false;
if (!wmm_param)
return false;
if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
return false;
if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
uapsd_queues = ifmgd->uapsd_queues;
count = wmm_param[6] & 0x0f;
/* -1 is the initial value of ifmgd->mu_edca_last_param_set.
* if mu_edca was preset before and now it disappeared tell
* the driver about it.
*/
mu_edca_count = mu_edca ? mu_edca->mu_qos_info & 0x0f : -1;
if (count == link->u.mgd.wmm_last_param_set &&
mu_edca_count == link->u.mgd.mu_edca_last_param_set)
return false;
link->u.mgd.wmm_last_param_set = count;
link->u.mgd.mu_edca_last_param_set = mu_edca_count;
pos = wmm_param + 8;
left = wmm_param_len - 8;
memset(¶ms, 0, sizeof(params));
sdata->wmm_acm = 0;
for (; left >= 4; left -= 4, pos += 4) {
int aci = (pos[0] >> 5) & 0x03;
int acm = (pos[0] >> 4) & 0x01;
bool uapsd = false;
switch (aci) {
case 1: /* AC_BK */
ac = IEEE80211_AC_BK;
if (acm)
sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
uapsd = true;
params[ac].mu_edca = !!mu_edca;
if (mu_edca)
params[ac].mu_edca_param_rec = mu_edca->ac_bk;
break;
case 2: /* AC_VI */
ac = IEEE80211_AC_VI;
if (acm)
sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
uapsd = true;
params[ac].mu_edca = !!mu_edca;
if (mu_edca)
params[ac].mu_edca_param_rec = mu_edca->ac_vi;
break;
case 3: /* AC_VO */
ac = IEEE80211_AC_VO;
if (acm)
sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd = true;
params[ac].mu_edca = !!mu_edca;
if (mu_edca)
params[ac].mu_edca_param_rec = mu_edca->ac_vo;
break;
case 0: /* AC_BE */
default:
ac = IEEE80211_AC_BE;
if (acm)
sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
uapsd = true;
params[ac].mu_edca = !!mu_edca;
if (mu_edca)
params[ac].mu_edca_param_rec = mu_edca->ac_be;
break;
}
params[ac].aifs = pos[0] & 0x0f;
if (params[ac].aifs < 2) {
link_info(link,
"AP has invalid WMM params (AIFSN=%d for ACI %d), will use 2\n",
params[ac].aifs, aci);
params[ac].aifs = 2;
}
params[ac].cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
params[ac].cw_min = ecw2cw(pos[1] & 0x0f);
params[ac].txop = get_unaligned_le16(pos + 2);
params[ac].acm = acm;
params[ac].uapsd = uapsd;
if (params[ac].cw_min == 0 ||
params[ac].cw_min > params[ac].cw_max) {
link_info(link,
"AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n",
params[ac].cw_min, params[ac].cw_max, aci);
return false;
}
ieee80211_regulatory_limit_wmm_params(sdata, ¶ms[ac], ac);
}
/* WMM specification requires all 4 ACIs. */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
if (params[ac].cw_min == 0) {
link_info(link,
"AP has invalid WMM params (missing AC %d), using defaults\n",
ac);
return false;
}
}
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
link->tx_conf[ac] = params[ac];
ieee80211_mgd_set_link_qos_params(link);
/* enable WMM or activate new settings */
link->conf->qos = true;
return true;
}
static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
{
lockdep_assert_held(&sdata->local->mtx);
sdata->u.mgd.flags &= ~IEEE80211_STA_CONNECTION_POLL;
ieee80211_run_deferred_scan(sdata->local);
}
static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
{
mutex_lock(&sdata->local->mtx);
__ieee80211_stop_poll(sdata);
mutex_unlock(&sdata->local->mtx);
}
static u64 ieee80211_handle_bss_capability(struct ieee80211_link_data *link,
u16 capab, bool erp_valid, u8 erp)
{
struct ieee80211_bss_conf *bss_conf = link->conf;
struct ieee80211_supported_band *sband;
u64 changed = 0;
bool use_protection;
bool use_short_preamble;
bool use_short_slot;
sband = ieee80211_get_link_sband(link);
if (!sband)
return changed;
if (erp_valid) {
use_protection = (erp & WLAN_ERP_USE_PROTECTION) != 0;
use_short_preamble = (erp & WLAN_ERP_BARKER_PREAMBLE) == 0;
} else {
use_protection = false;
use_short_preamble = !!(capab & WLAN_CAPABILITY_SHORT_PREAMBLE);
}
use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
if (sband->band == NL80211_BAND_5GHZ ||
sband->band == NL80211_BAND_6GHZ)
use_short_slot = true;
if (use_protection != bss_conf->use_cts_prot) {
bss_conf->use_cts_prot = use_protection;
changed |= BSS_CHANGED_ERP_CTS_PROT;
}
if (use_short_preamble != bss_conf->use_short_preamble) {
bss_conf->use_short_preamble = use_short_preamble;
changed |= BSS_CHANGED_ERP_PREAMBLE;
}
if (use_short_slot != bss_conf->use_short_slot) {
bss_conf->use_short_slot = use_short_slot;
changed |= BSS_CHANGED_ERP_SLOT;
}
return changed;
}
static u64 ieee80211_link_set_associated(struct ieee80211_link_data *link,
struct cfg80211_bss *cbss)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_bss_conf *bss_conf = link->conf;
struct ieee80211_bss *bss = (void *)cbss->priv;
u64 changed = BSS_CHANGED_QOS;
/* not really used in MLO */
sdata->u.mgd.beacon_timeout =
usecs_to_jiffies(ieee80211_tu_to_usec(beacon_loss_count *
bss_conf->beacon_int));
changed |= ieee80211_handle_bss_capability(link,
bss_conf->assoc_capability,
bss->has_erp_value,
bss->erp_value);
ieee80211_check_rate_mask(link);
link->u.mgd.bss = cbss;
memcpy(link->u.mgd.bssid, cbss->bssid, ETH_ALEN);
if (sdata->vif.p2p ||
sdata->vif.driver_flags & IEEE80211_VIF_GET_NOA_UPDATE) {
const struct cfg80211_bss_ies *ies;
rcu_read_lock();
ies = rcu_dereference(cbss->ies);
if (ies) {
int ret;
ret = cfg80211_get_p2p_attr(
ies->data, ies->len,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *) &bss_conf->p2p_noa_attr,
sizeof(bss_conf->p2p_noa_attr));
if (ret >= 2) {
link->u.mgd.p2p_noa_index =
bss_conf->p2p_noa_attr.index;
changed |= BSS_CHANGED_P2P_PS;
}
}
rcu_read_unlock();
}
if (link->u.mgd.have_beacon) {
bss_conf->beacon_rate = bss->beacon_rate;
changed |= BSS_CHANGED_BEACON_INFO;
} else {
bss_conf->beacon_rate = NULL;
}
/* Tell the driver to monitor connection quality (if supported) */
if (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI &&
bss_conf->cqm_rssi_thold)
changed |= BSS_CHANGED_CQM;
return changed;
}
static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data,
u64 changed[IEEE80211_MLD_MAX_NUM_LINKS])
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
u64 vif_changed = BSS_CHANGED_ASSOC;
unsigned int link_id;
sdata->u.mgd.associated = true;
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
struct ieee80211_link_data *link;
if (!cbss ||
assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS)
continue;
if (ieee80211_vif_is_mld(&sdata->vif) &&
!(ieee80211_vif_usable_links(&sdata->vif) & BIT(link_id)))
continue;
link = sdata_dereference(sdata->link[link_id], sdata);
if (WARN_ON(!link))
return;
changed[link_id] |= ieee80211_link_set_associated(link, cbss);
}
/* just to be sure */
ieee80211_stop_poll(sdata);
ieee80211_led_assoc(local, 1);
vif_cfg->assoc = 1;
/* Enable ARP filtering */
if (vif_cfg->arp_addr_cnt)
vif_changed |= BSS_CHANGED_ARP_FILTER;
if (ieee80211_vif_is_mld(&sdata->vif)) {
for (link_id = 0;
link_id < IEEE80211_MLD_MAX_NUM_LINKS;
link_id++) {
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
if (!cbss ||
!(BIT(link_id) &
ieee80211_vif_usable_links(&sdata->vif)) ||
assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS)
continue;
link = sdata_dereference(sdata->link[link_id], sdata);
if (WARN_ON(!link))
return;
ieee80211_link_info_change_notify(sdata, link,
changed[link_id]);
ieee80211_recalc_smps(sdata, link);
}
ieee80211_vif_cfg_change_notify(sdata, vif_changed);
} else {
ieee80211_bss_info_change_notify(sdata,
vif_changed | changed[0]);
}
mutex_lock(&local->iflist_mtx);
ieee80211_recalc_ps(local);
mutex_unlock(&local->iflist_mtx);
/* leave this here to not change ordering in non-MLO cases */
if (!ieee80211_vif_is_mld(&sdata->vif))
ieee80211_recalc_smps(sdata, &sdata->deflink);
ieee80211_recalc_ps_vif(sdata);
netif_carrier_on(sdata->dev);
}
static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
u16 stype, u16 reason, bool tx,
u8 *frame_buf)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
unsigned int link_id;
u64 changed = 0;
struct ieee80211_prep_tx_info info = {
.subtype = stype,
};
sdata_assert_lock(sdata);
if (WARN_ON_ONCE(tx && !frame_buf))
return;
if (WARN_ON(!ifmgd->associated))
return;
ieee80211_stop_poll(sdata);
ifmgd->associated = false;
/* other links will be destroyed */
sdata->deflink.u.mgd.bss = NULL;
netif_carrier_off(sdata->dev);
/*
* if we want to get out of ps before disassoc (why?) we have
* to do it before sending disassoc, as otherwise the null-packet
* won't be valid.
*/
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
local->ps_sdata = NULL;
/* disable per-vif ps */
ieee80211_recalc_ps_vif(sdata);
/* make sure ongoing transmission finishes */
synchronize_net();
/*
* drop any frame before deauth/disassoc, this can be data or
* management frame. Since we are disconnecting, we should not
* insist sending these frames which can take time and delay
* the disconnection and possible the roaming.
*/
if (tx)
ieee80211_flush_queues(local, sdata, true);
/* deauthenticate/disassociate now */
if (tx || frame_buf) {
/*
* In multi channel scenarios guarantee that the virtual
* interface is granted immediate airtime to transmit the
* deauthentication frame by calling mgd_prepare_tx, if the
* driver requested so.
*/
if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP) &&
!sdata->deflink.u.mgd.have_beacon) {
drv_mgd_prepare_tx(sdata->local, sdata, &info);
}
ieee80211_send_deauth_disassoc(sdata, sdata->vif.cfg.ap_addr,
sdata->vif.cfg.ap_addr, stype,
reason, tx, frame_buf);
}
/* flush out frame - make sure the deauth was actually sent */
if (tx)
ieee80211_flush_queues(local, sdata, false);
drv_mgd_complete_tx(sdata->local, sdata, &info);
/* clear AP addr only after building the needed mgmt frames */
eth_zero_addr(sdata->deflink.u.mgd.bssid);
eth_zero_addr(sdata->vif.cfg.ap_addr);
sdata->vif.cfg.ssid_len = 0;
/* remove AP and TDLS peers */
sta_info_flush(sdata);
/* finally reset all BSS / config parameters */
if (!ieee80211_vif_is_mld(&sdata->vif))
changed |= ieee80211_reset_erp_info(sdata);
ieee80211_led_assoc(local, 0);
changed |= BSS_CHANGED_ASSOC;
sdata->vif.cfg.assoc = false;
sdata->deflink.u.mgd.p2p_noa_index = -1;
memset(&sdata->vif.bss_conf.p2p_noa_attr, 0,
sizeof(sdata->vif.bss_conf.p2p_noa_attr));
/* on the next assoc, re-program HT/VHT parameters */
memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
/*
* reset MU-MIMO ownership and group data in default link,
* if used, other links are destroyed
*/
memset(sdata->vif.bss_conf.mu_group.membership, 0,
sizeof(sdata->vif.bss_conf.mu_group.membership));
memset(sdata->vif.bss_conf.mu_group.position, 0,
sizeof(sdata->vif.bss_conf.mu_group.position));
if (!ieee80211_vif_is_mld(&sdata->vif))
changed |= BSS_CHANGED_MU_GROUPS;
sdata->vif.bss_conf.mu_mimo_owner = false;
sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
del_timer_sync(&local->dynamic_ps_timer);
cancel_work_sync(&local->dynamic_ps_enable_work);
/* Disable ARP filtering */
if (sdata->vif.cfg.arp_addr_cnt)
changed |= BSS_CHANGED_ARP_FILTER;
sdata->vif.bss_conf.qos = false;
if (!ieee80211_vif_is_mld(&sdata->vif)) {
changed |= BSS_CHANGED_QOS;
/* The BSSID (not really interesting) and HT changed */
changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
ieee80211_bss_info_change_notify(sdata, changed);
} else {
ieee80211_vif_cfg_change_notify(sdata, changed);
}
/* disassociated - set to defaults now */
ieee80211_set_wmm_default(&sdata->deflink, false, false);
del_timer_sync(&sdata->u.mgd.conn_mon_timer);
del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
del_timer_sync(&sdata->u.mgd.timer);
sdata->vif.bss_conf.dtim_period = 0;
sdata->vif.bss_conf.beacon_rate = NULL;
sdata->deflink.u.mgd.have_beacon = false;
sdata->deflink.u.mgd.tracking_signal_avg = false;
sdata->deflink.u.mgd.disable_wmm_tracking = false;
ifmgd->flags = 0;
sdata->deflink.u.mgd.conn_flags = 0;
mutex_lock(&local->mtx);
for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
struct ieee80211_link_data *link;
link = sdata_dereference(sdata->link[link_id], sdata);
if (!link)
continue;
ieee80211_link_release_channel(link);
}
sdata->vif.bss_conf.csa_active = false;
sdata->deflink.u.mgd.csa_waiting_bcn = false;
sdata->deflink.u.mgd.csa_ignored_same_chan = false;
if (sdata->deflink.csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
sdata->deflink.csa_block_tx = false;
}
mutex_unlock(&local->mtx);
/* existing TX TSPEC sessions no longer exist */
memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec));
cancel_delayed_work_sync(&ifmgd->tx_tspec_wk);
sdata->vif.bss_conf.pwr_reduction = 0;
sdata->vif.bss_conf.tx_pwr_env_num = 0;
memset(sdata->vif.bss_conf.tx_pwr_env, 0,
sizeof(sdata->vif.bss_conf.tx_pwr_env));
ieee80211_vif_set_links(sdata, 0, 0);
}
static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
mutex_lock(&local->mtx);
if (!(ifmgd->flags & IEEE80211_STA_CONNECTION_POLL))
goto out;
__ieee80211_stop_poll(sdata);
mutex_lock(&local->iflist_mtx);
ieee80211_recalc_ps(local);
mutex_unlock(&local->iflist_mtx);
if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
goto out;
/*
* We've received a probe response, but are not sure whether
* we have or will be receiving any beacons or data, so let's
* schedule the timers again, just in case.
*/
ieee80211_sta_reset_beacon_monitor(sdata);
mod_timer(&ifmgd->conn_mon_timer,
round_jiffies_up(jiffies +
IEEE80211_CONNECTION_IDLE_TIME));
out:
mutex_unlock(&local->mtx);
}
static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr,
u16 tx_time)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u16 tid;
int ac;
struct ieee80211_sta_tx_tspec *tx_tspec;
unsigned long now = jiffies;
if (!ieee80211_is_data_qos(hdr->frame_control))
return;
tid = ieee80211_get_tid(hdr);
ac = ieee80211_ac_from_tid(tid);
tx_tspec = &ifmgd->tx_tspec[ac];
if (likely(!tx_tspec->admitted_time))
return;
if (time_after(now, tx_tspec->time_slice_start + HZ)) {
tx_tspec->consumed_tx_time = 0;
tx_tspec->time_slice_start = now;
if (tx_tspec->downgraded) {
tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE;
schedule_delayed_work(&ifmgd->tx_tspec_wk, 0);
}
}
if (tx_tspec->downgraded)
return;
tx_tspec->consumed_tx_time += tx_time;
if (tx_tspec->consumed_tx_time >= tx_tspec->admitted_time) {
tx_tspec->downgraded = true;
tx_tspec->action = TX_TSPEC_ACTION_DOWNGRADE;
schedule_delayed_work(&ifmgd->tx_tspec_wk, 0);
}
}
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr, bool ack, u16 tx_time)
{
ieee80211_sta_tx_wmm_ac_notify(sdata, hdr, tx_time);
if (!ieee80211_is_any_nullfunc(hdr->frame_control) ||
!sdata->u.mgd.probe_send_count)
return;
if (ack)
sdata->u.mgd.probe_send_count = 0;
else
sdata->u.mgd.nullfunc_failed = true;
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
}
static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata,
const u8 *src, const u8 *dst,
const u8 *ssid, size_t ssid_len,
struct ieee80211_channel *channel)
{
struct sk_buff *skb;
skb = ieee80211_build_probe_req(sdata, src, dst, (u32)-1, channel,
ssid, ssid_len, NULL, 0,
IEEE80211_PROBE_FLAG_DIRECTED);
if (skb)
ieee80211_tx_skb(sdata, skb);
}
static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 *dst = sdata->vif.cfg.ap_addr;
u8 unicast_limit = max(1, max_probe_tries - 3);
struct sta_info *sta;
if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
return;
/*
* Try sending broadcast probe requests for the last three
* probe requests after the first ones failed since some
* buggy APs only support broadcast probe requests.
*/
if (ifmgd->probe_send_count >= unicast_limit)
dst = NULL;
/*
* When the hardware reports an accurate Tx ACK status, it's
* better to send a nullfunc frame instead of a probe request,
* as it will kick us off the AP quickly if we aren't associated
* anymore. The timeout will be reset if the frame is ACKed by
* the AP.
*/
ifmgd->probe_send_count++;
if (dst) {
mutex_lock(&sdata->local->sta_mtx);
sta = sta_info_get(sdata, dst);
if (!WARN_ON(!sta))
ieee80211_check_fast_rx(sta);
mutex_unlock(&sdata->local->sta_mtx);
}
if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
ifmgd->nullfunc_failed = false;
ieee80211_send_nullfunc(sdata->local, sdata, false);
} else {
ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst,
sdata->vif.cfg.ssid,
sdata->vif.cfg.ssid_len,
sdata->deflink.u.mgd.bss->channel);
}
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
run_again(sdata, ifmgd->probe_timeout);
}
static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
bool beacon)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
bool already = false;
if (WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif)))
return;
if (!ieee80211_sdata_running(sdata))
return;
sdata_lock(sdata);
if (!ifmgd->associated)
goto out;
mutex_lock(&sdata->local->mtx);
if (sdata->local->tmp_channel || sdata->local->scanning) {
mutex_unlock(&sdata->local->mtx);
goto out;
}
if (sdata->local->suspending) {
/* reschedule after resume */
mutex_unlock(&sdata->local->mtx);
ieee80211_reset_ap_probe(sdata);
goto out;
}
if (beacon) {
mlme_dbg_ratelimited(sdata,
"detected beacon loss from AP (missed %d beacons) - probing\n",
beacon_loss_count);
ieee80211_cqm_beacon_loss_notify(&sdata->vif, GFP_KERNEL);
}
/*
* The driver/our work has already reported this event or the
* connection monitoring has kicked in and we have already sent
* a probe request. Or maybe the AP died and the driver keeps
* reporting until we disassociate...
*
* In either case we have to ignore the current call to this
* function (except for setting the correct probe reason bit)
* because otherwise we would reset the timer every time and
* never check whether we received a probe response!
*/
if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
already = true;
ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
mutex_unlock(&sdata->local->mtx);
if (already)
goto out;
mutex_lock(&sdata->local->iflist_mtx);
ieee80211_recalc_ps(sdata->local);
mutex_unlock(&sdata->local->iflist_mtx);
ifmgd->probe_send_count = 0;
ieee80211_mgd_probe_ap_send(sdata);
out:
sdata_unlock(sdata);
}
struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct cfg80211_bss *cbss;
struct sk_buff *skb;
const struct element *ssid;
int ssid_len;
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
ieee80211_vif_is_mld(&sdata->vif)))
return NULL;
sdata_assert_lock(sdata);
if (ifmgd->associated)
cbss = sdata->deflink.u.mgd.bss;
else if (ifmgd->auth_data)
cbss = ifmgd->auth_data->bss;
else if (ifmgd->assoc_data && ifmgd->assoc_data->link[0].bss)
cbss = ifmgd->assoc_data->link[0].bss;
else
return NULL;
rcu_read_lock();
ssid = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID);
if (WARN_ONCE(!ssid || ssid->datalen > IEEE80211_MAX_SSID_LEN,
"invalid SSID element (len=%d)",
ssid ? ssid->datalen : -1))
ssid_len = 0;
else
ssid_len = ssid->datalen;
skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid,
(u32) -1, cbss->channel,
ssid->data, ssid_len,
NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED);
rcu_read_unlock();
return skb;
}
EXPORT_SYMBOL(ieee80211_ap_probereq_get);
static void ieee80211_report_disconnect(struct ieee80211_sub_if_data *sdata,
const u8 *buf, size_t len, bool tx,
u16 reason, bool reconnect)
{
struct ieee80211_event event = {
.type = MLME_EVENT,
.u.mlme.data = tx ? DEAUTH_TX_EVENT : DEAUTH_RX_EVENT,
.u.mlme.reason = reason,
};
if (tx)
cfg80211_tx_mlme_mgmt(sdata->dev, buf, len, reconnect);
else
cfg80211_rx_mlme_mgmt(sdata->dev, buf, len);
drv_event_callback(sdata->local, sdata, &event);
}
static void ___ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
bool tx;
if (!ifmgd->associated)
return;
/* in MLO assume we have a link where we can TX the frame */
tx = ieee80211_vif_is_mld(&sdata->vif) ||
!sdata->deflink.csa_block_tx;
if (!ifmgd->driver_disconnect) {
unsigned int link_id;
/*
* AP is probably out of range (or not reachable for another
* reason) so remove the bss structs for that AP. In the case
* of multi-link, it's not clear that all of them really are
* out of range, but if they weren't the driver likely would
* have switched to just have a single link active?
*/
for (link_id = 0;
link_id < ARRAY_SIZE(sdata->link);
link_id++) {
struct ieee80211_link_data *link;
link = sdata_dereference(sdata->link[link_id], sdata);
if (!link)
continue;
cfg80211_unlink_bss(local->hw.wiphy, link->u.mgd.bss);
link->u.mgd.bss = NULL;
}
}
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
ifmgd->driver_disconnect ?
WLAN_REASON_DEAUTH_LEAVING :
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
tx, frame_buf);
mutex_lock(&local->mtx);
/* the other links will be destroyed */
sdata->vif.bss_conf.csa_active = false;
sdata->deflink.u.mgd.csa_waiting_bcn = false;
if (sdata->deflink.csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
sdata->deflink.csa_block_tx = false;
}
mutex_unlock(&local->mtx);
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
ifmgd->reconnect);
ifmgd->reconnect = false;
}
static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
{
sdata_lock(sdata);
___ieee80211_disconnect(sdata);
sdata_unlock(sdata);
}
static void ieee80211_beacon_connection_loss_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
u.mgd.beacon_connection_loss_work);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
if (ifmgd->connection_loss) {
sdata_info(sdata, "Connection to AP %pM lost\n",
sdata->vif.cfg.ap_addr);
__ieee80211_disconnect(sdata);
ifmgd->connection_loss = false;
} else if (ifmgd->driver_disconnect) {
sdata_info(sdata,
"Driver requested disconnection from AP %pM\n",
sdata->vif.cfg.ap_addr);
__ieee80211_disconnect(sdata);
ifmgd->driver_disconnect = false;
} else {
if (ifmgd->associated)
sdata->deflink.u.mgd.beacon_loss_count++;
ieee80211_mgd_probe_ap(sdata, true);
}
}
static void ieee80211_csa_connection_drop_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
u.mgd.csa_connection_drop_work);
__ieee80211_disconnect(sdata);
}
void ieee80211_beacon_loss(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_hw *hw = &sdata->local->hw;
trace_api_beacon_loss(sdata);
sdata->u.mgd.connection_loss = false;
wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work);
}
EXPORT_SYMBOL(ieee80211_beacon_loss);
void ieee80211_connection_loss(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_hw *hw = &sdata->local->hw;
trace_api_connection_loss(sdata);
sdata->u.mgd.connection_loss = true;
wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work);
}
EXPORT_SYMBOL(ieee80211_connection_loss);
void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_hw *hw = &sdata->local->hw;
trace_api_disconnect(sdata, reconnect);
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
return;
sdata->u.mgd.driver_disconnect = true;
sdata->u.mgd.reconnect = reconnect;
wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work);
}
EXPORT_SYMBOL(ieee80211_disconnect);
static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
bool assoc)
{
struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
sdata_assert_lock(sdata);
if (!assoc) {
/*
* we are not authenticated yet, the only timer that could be
* running is the timeout for the authentication response which
* which is not relevant anymore.
*/
del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, auth_data->ap_addr);
/* other links are destroyed */
sdata->deflink.u.mgd.conn_flags = 0;
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
mutex_lock(&sdata->local->mtx);
ieee80211_link_release_channel(&sdata->deflink);
ieee80211_vif_set_links(sdata, 0, 0);
mutex_unlock(&sdata->local->mtx);
}
cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss);
kfree(auth_data);
sdata->u.mgd.auth_data = NULL;
}
enum assoc_status {
ASSOC_SUCCESS,
ASSOC_REJECTED,
ASSOC_TIMEOUT,
ASSOC_ABANDON,
};
static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
enum assoc_status status)
{
struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
sdata_assert_lock(sdata);
if (status != ASSOC_SUCCESS) {
/*
* we are not associated yet, the only timer that could be
* running is the timeout for the association response which
* which is not relevant anymore.
*/
del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, assoc_data->ap_addr);
sdata->deflink.u.mgd.conn_flags = 0;
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
sdata->vif.bss_conf.mu_mimo_owner = false;
if (status != ASSOC_REJECTED) {
struct cfg80211_assoc_failure data = {
.timeout = status == ASSOC_TIMEOUT,
};
int i;
BUILD_BUG_ON(ARRAY_SIZE(data.bss) !=
ARRAY_SIZE(assoc_data->link));
for (i = 0; i < ARRAY_SIZE(data.bss); i++)
data.bss[i] = assoc_data->link[i].bss;
if (ieee80211_vif_is_mld(&sdata->vif))
data.ap_mld_addr = assoc_data->ap_addr;
cfg80211_assoc_failure(sdata->dev, &data);
}
mutex_lock(&sdata->local->mtx);
ieee80211_link_release_channel(&sdata->deflink);
ieee80211_vif_set_links(sdata, 0, 0);
mutex_unlock(&sdata->local->mtx);
}
kfree(assoc_data);
sdata->u.mgd.assoc_data = NULL;
}
static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
const struct element *challenge;
u8 *pos;
u32 tx_flags = 0;
struct ieee80211_prep_tx_info info = {
.subtype = IEEE80211_STYPE_AUTH,
};
pos = mgmt->u.auth.variable;
challenge = cfg80211_find_elem(WLAN_EID_CHALLENGE, pos,
len - (pos - (u8 *)mgmt));
if (!challenge)
return;
auth_data->expected_transaction = 4;
drv_mgd_prepare_tx(sdata->local, sdata, &info);
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0,
(void *)challenge,
challenge->datalen + sizeof(*challenge),
auth_data->ap_addr, auth_data->ap_addr,
auth_data->key, auth_data->key_len,
auth_data->key_idx, tx_flags);
}
static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
const u8 *ap_addr = ifmgd->auth_data->ap_addr;
struct sta_info *sta;
bool result = true;
sdata_info(sdata, "authenticated\n");
ifmgd->auth_data->done = true;
ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
ifmgd->auth_data->timeout_started = true;
run_again(sdata, ifmgd->auth_data->timeout);
/* move station state to auth */
mutex_lock(&sdata->local->sta_mtx);
sta = sta_info_get(sdata, ap_addr);
if (!sta) {
WARN_ONCE(1, "%s: STA %pM not found", sdata->name, ap_addr);
result = false;
goto out;
}
if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
sdata_info(sdata, "failed moving %pM to auth\n", ap_addr);
result = false;
goto out;
}
out:
mutex_unlock(&sdata->local->sta_mtx);
return result;
}
static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u16 auth_alg, auth_transaction, status_code;
struct ieee80211_event event = {
.type = MLME_EVENT,
.u.mlme.data = AUTH_EVENT,
};
struct ieee80211_prep_tx_info info = {
.subtype = IEEE80211_STYPE_AUTH,
};
sdata_assert_lock(sdata);
if (len < 24 + 6)
return;
if (!ifmgd->auth_data || ifmgd->auth_data->done)
return;
if (!ether_addr_equal(ifmgd->auth_data->ap_addr, mgmt->bssid))
return;
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
status_code = le16_to_cpu(mgmt->u.auth.status_code);
if (auth_alg != ifmgd->auth_data->algorithm ||
(auth_alg != WLAN_AUTH_SAE &&
auth_transaction != ifmgd->auth_data->expected_transaction) ||
(auth_alg == WLAN_AUTH_SAE &&
(auth_transaction < ifmgd->auth_data->expected_transaction ||
auth_transaction > 2))) {
sdata_info(sdata, "%pM unexpected authentication state: alg %d (expected %d) transact %d (expected %d)\n",
mgmt->sa, auth_alg, ifmgd->auth_data->algorithm,
auth_transaction,
ifmgd->auth_data->expected_transaction);
goto notify_driver;
}
if (status_code != WLAN_STATUS_SUCCESS) {
cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
if (auth_alg == WLAN_AUTH_SAE &&
(status_code == WLAN_STATUS_ANTI_CLOG_REQUIRED ||
(auth_transaction == 1 &&
(status_code == WLAN_STATUS_SAE_HASH_TO_ELEMENT ||
status_code == WLAN_STATUS_SAE_PK)))) {
/* waiting for userspace now */
ifmgd->auth_data->waiting = true;
ifmgd->auth_data->timeout =
jiffies + IEEE80211_AUTH_WAIT_SAE_RETRY;
ifmgd->auth_data->timeout_started = true;
run_again(sdata, ifmgd->auth_data->timeout);
goto notify_driver;
}
sdata_info(sdata, "%pM denied authentication (status %d)\n",
mgmt->sa, status_code);
ieee80211_destroy_auth_data(sdata, false);
event.u.mlme.status = MLME_DENIED;
event.u.mlme.reason = status_code;
drv_event_callback(sdata->local, sdata, &event);
goto notify_driver;
}
switch (ifmgd->auth_data->algorithm) {
case WLAN_AUTH_OPEN:
case WLAN_AUTH_LEAP:
case WLAN_AUTH_FT:
case WLAN_AUTH_SAE:
case WLAN_AUTH_FILS_SK:
case WLAN_AUTH_FILS_SK_PFS:
case WLAN_AUTH_FILS_PK:
break;
case WLAN_AUTH_SHARED_KEY:
if (ifmgd->auth_data->expected_transaction != 4) {
ieee80211_auth_challenge(sdata, mgmt, len);
/* need another frame */
return;
}
break;
default:
WARN_ONCE(1, "invalid auth alg %d",
ifmgd->auth_data->algorithm);
goto notify_driver;
}
event.u.mlme.status = MLME_SUCCESS;
info.success = 1;
drv_event_callback(sdata->local, sdata, &event);
if (ifmgd->auth_data->algorithm != WLAN_AUTH_SAE ||
(auth_transaction == 2 &&
ifmgd->auth_data->expected_transaction == 2)) {
if (!ieee80211_mark_sta_auth(sdata))
return; /* ignore frame -- wait for timeout */
} else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
auth_transaction == 2) {
sdata_info(sdata, "SAE peer confirmed\n");
ifmgd->auth_data->peer_confirmed = true;
}
cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
notify_driver:
drv_mgd_complete_tx(sdata->local, sdata, &info);
}
#define case_WLAN(type) \
case WLAN_REASON_##type: return #type
const char *ieee80211_get_reason_code_string(u16 reason_code)
{
switch (reason_code) {
case_WLAN(UNSPECIFIED);
case_WLAN(PREV_AUTH_NOT_VALID);
case_WLAN(DEAUTH_LEAVING);
case_WLAN(DISASSOC_DUE_TO_INACTIVITY);
case_WLAN(DISASSOC_AP_BUSY);
case_WLAN(CLASS2_FRAME_FROM_NONAUTH_STA);
case_WLAN(CLASS3_FRAME_FROM_NONASSOC_STA);
case_WLAN(DISASSOC_STA_HAS_LEFT);
case_WLAN(STA_REQ_ASSOC_WITHOUT_AUTH);
case_WLAN(DISASSOC_BAD_POWER);
case_WLAN(DISASSOC_BAD_SUPP_CHAN);
case_WLAN(INVALID_IE);
case_WLAN(MIC_FAILURE);
case_WLAN(4WAY_HANDSHAKE_TIMEOUT);
case_WLAN(GROUP_KEY_HANDSHAKE_TIMEOUT);
case_WLAN(IE_DIFFERENT);
case_WLAN(INVALID_GROUP_CIPHER);
case_WLAN(INVALID_PAIRWISE_CIPHER);
case_WLAN(INVALID_AKMP);
case_WLAN(UNSUPP_RSN_VERSION);
case_WLAN(INVALID_RSN_IE_CAP);
case_WLAN(IEEE8021X_FAILED);
case_WLAN(CIPHER_SUITE_REJECTED);
case_WLAN(DISASSOC_UNSPECIFIED_QOS);
case_WLAN(DISASSOC_QAP_NO_BANDWIDTH);
case_WLAN(DISASSOC_LOW_ACK);
case_WLAN(DISASSOC_QAP_EXCEED_TXOP);
case_WLAN(QSTA_LEAVE_QBSS);
case_WLAN(QSTA_NOT_USE);
case_WLAN(QSTA_REQUIRE_SETUP);
case_WLAN(QSTA_TIMEOUT);
case_WLAN(QSTA_CIPHER_NOT_SUPP);
case_WLAN(MESH_PEER_CANCELED);
case_WLAN(MESH_MAX_PEERS);
case_WLAN(MESH_CONFIG);
case_WLAN(MESH_CLOSE);
case_WLAN(MESH_MAX_RETRIES);
case_WLAN(MESH_CONFIRM_TIMEOUT);
case_WLAN(MESH_INVALID_GTK);
case_WLAN(MESH_INCONSISTENT_PARAM);
case_WLAN(MESH_INVALID_SECURITY);
case_WLAN(MESH_PATH_ERROR);
case_WLAN(MESH_PATH_NOFORWARD);
case_WLAN(MESH_PATH_DEST_UNREACHABLE);
case_WLAN(MAC_EXISTS_IN_MBSS);
case_WLAN(MESH_CHAN_REGULATORY);
case_WLAN(MESH_CHAN);
default: return "<unknown>";
}
}
static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
sdata_assert_lock(sdata);
if (len < 24 + 2)
return;
if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
return;
}
if (ifmgd->associated &&
ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) {
sdata_info(sdata, "deauthenticated from %pM (Reason: %u=%s)\n",
sdata->vif.cfg.ap_addr, reason_code,
ieee80211_get_reason_code_string(reason_code));
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false,
reason_code, false);
return;
}
if (ifmgd->assoc_data &&
ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->ap_addr)) {
sdata_info(sdata,
"deauthenticated from %pM while associating (Reason: %u=%s)\n",
ifmgd->assoc_data->ap_addr, reason_code,
ieee80211_get_reason_code_string(reason_code));
ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON);
cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
return;
}
}
static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u16 reason_code;
sdata_assert_lock(sdata);
if (len < 24 + 2)
return;
if (!ifmgd->associated ||
!ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr))
return;
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
return;
}
sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n",
sdata->vif.cfg.ap_addr, reason_code,
ieee80211_get_reason_code_string(reason_code));
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code,
false);
}
static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
u8 *supp_rates, unsigned int supp_rates_len,
u32 *rates, u32 *basic_rates,
bool *have_higher_than_11mbit,
int *min_rate, int *min_rate_index,
int shift)
{
int i, j;
for (i = 0; i < supp_rates_len; i++) {
int rate = supp_rates[i] & 0x7f;
bool is_basic = !!(supp_rates[i] & 0x80);
if ((rate * 5 * (1 << shift)) > 110)
*have_higher_than_11mbit = true;
/*
* Skip HT, VHT, HE, EHT and SAE H2E only BSS membership
* selectors since they're not rates.
*
* Note: Even though the membership selector and the basic
* rate flag share the same bit, they are not exactly
* the same.
*/
if (supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HT_PHY) ||
supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_VHT_PHY) ||
supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HE_PHY) ||
supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_EHT_PHY) ||
supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_SAE_H2E))
continue;
for (j = 0; j < sband->n_bitrates; j++) {
struct ieee80211_rate *br;
int brate;
br = &sband->bitrates[j];
brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
if (brate == rate) {
*rates |= BIT(j);
if (is_basic)
*basic_rates |= BIT(j);
if ((rate * 5) < *min_rate) {
*min_rate = rate * 5;
*min_rate_index = j;
}
break;
}
}
}
}
static bool ieee80211_twt_req_supported(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct link_sta_info *link_sta,
const struct ieee802_11_elems *elems)
{
const struct ieee80211_sta_he_cap *own_he_cap =
ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
if (elems->ext_capab_len < 10)
return false;
if (!(elems->ext_capab[9] & WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT))
return false;
return link_sta->pub->he_cap.he_cap_elem.mac_cap_info[0] &
IEEE80211_HE_MAC_CAP0_TWT_RES &&
own_he_cap &&
(own_he_cap->he_cap_elem.mac_cap_info[0] &
IEEE80211_HE_MAC_CAP0_TWT_REQ);
}
static u64 ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
struct ieee80211_link_data *link,
struct link_sta_info *link_sta,
struct ieee802_11_elems *elems)
{
bool twt = ieee80211_twt_req_supported(sdata, sband, link_sta, elems);
if (link->conf->twt_requester != twt) {
link->conf->twt_requester = twt;
return BSS_CHANGED_TWT;
}
return 0;
}
static bool ieee80211_twt_bcast_support(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *bss_conf,
struct ieee80211_supported_band *sband,
struct link_sta_info *link_sta)
{
const struct ieee80211_sta_he_cap *own_he_cap =
ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
return bss_conf->he_support &&
(link_sta->pub->he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_BCAST_TWT) &&
own_he_cap &&
(own_he_cap->he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_BCAST_TWT);
}
static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
struct link_sta_info *link_sta,
struct cfg80211_bss *cbss,
struct ieee80211_mgmt *mgmt,
const u8 *elem_start,
unsigned int elem_len,
u64 *changed)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
struct ieee80211_bss_conf *bss_conf = link->conf;
struct ieee80211_local *local = sdata->local;
unsigned int link_id = link->link_id;
struct ieee80211_elems_parse_params parse_params = {
.start = elem_start,
.len = elem_len,
.link_id = link_id == assoc_data->assoc_link_id ? -1 : link_id,
.from_ap = true,
};
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
bool is_s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
const struct cfg80211_bss_ies *bss_ies = NULL;
struct ieee80211_supported_band *sband;
struct ieee802_11_elems *elems;
const __le16 prof_bss_param_ch_present =
cpu_to_le16(IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT);
u16 capab_info;
bool ret;
elems = ieee802_11_parse_elems_full(&parse_params);
if (!elems)
return false;
if (link_id == assoc_data->assoc_link_id) {
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
/*
* we should not get to this flow unless the association was
* successful, so set the status directly to success
*/
assoc_data->link[link_id].status = WLAN_STATUS_SUCCESS;
if (elems->ml_basic) {
if (!(elems->ml_basic->control &
cpu_to_le16(IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))) {
ret = false;
goto out;
}
link->u.mgd.bss_param_ch_cnt =
ieee80211_mle_get_bss_param_ch_cnt(elems->ml_basic);
}
} else if (!elems->prof ||
!(elems->prof->control & prof_bss_param_ch_present)) {
ret = false;
goto out;
} else {
const u8 *ptr = elems->prof->variable +
elems->prof->sta_info_len - 1;
/*
* During parsing, we validated that these fields exist,
* otherwise elems->prof would have been set to NULL.
*/
capab_info = get_unaligned_le16(ptr);
assoc_data->link[link_id].status = get_unaligned_le16(ptr + 2);
link->u.mgd.bss_param_ch_cnt =
ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(elems->prof);
if (assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) {
link_info(link, "association response status code=%u\n",
assoc_data->link[link_id].status);
ret = true;
goto out;
}
}
if (!is_s1g && !elems->supp_rates) {
sdata_info(sdata, "no SuppRates element in AssocResp\n");
ret = false;
goto out;
}
link->u.mgd.tdls_chan_switch_prohibited =
elems->ext_capab && elems->ext_capab_len >= 5 &&
(elems->ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED);
/*
* Some APs are erroneously not including some information in their
* (re)association response frames. Try to recover by using the data
* from the beacon or probe response. This seems to afflict mobile
* 2G/3G/4G wifi routers, reported models include the "Onda PN51T",
* "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device.
*/
if (!is_6ghz &&
((assoc_data->wmm && !elems->wmm_param) ||
(!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) &&
(!elems->ht_cap_elem || !elems->ht_operation)) ||
(!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) &&
(!elems->vht_cap_elem || !elems->vht_operation)))) {
const struct cfg80211_bss_ies *ies;
struct ieee802_11_elems *bss_elems;
rcu_read_lock();
ies = rcu_dereference(cbss->ies);
if (ies)
bss_ies = kmemdup(ies, sizeof(*ies) + ies->len,
GFP_ATOMIC);
rcu_read_unlock();
if (!bss_ies) {
ret = false;
goto out;
}
parse_params.start = bss_ies->data;
parse_params.len = bss_ies->len;
parse_params.bss = cbss;
bss_elems = ieee802_11_parse_elems_full(&parse_params);
if (!bss_elems) {
ret = false;
goto out;
}
if (assoc_data->wmm &&
!elems->wmm_param && bss_elems->wmm_param) {
elems->wmm_param = bss_elems->wmm_param;
sdata_info(sdata,
"AP bug: WMM param missing from AssocResp\n");
}
/*
* Also check if we requested HT/VHT, otherwise the AP doesn't
* have to include the IEs in the (re)association response.
*/
if (!elems->ht_cap_elem && bss_elems->ht_cap_elem &&
!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) {
elems->ht_cap_elem = bss_elems->ht_cap_elem;
sdata_info(sdata,
"AP bug: HT capability missing from AssocResp\n");
}
if (!elems->ht_operation && bss_elems->ht_operation &&
!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) {
elems->ht_operation = bss_elems->ht_operation;
sdata_info(sdata,
"AP bug: HT operation missing from AssocResp\n");
}
if (!elems->vht_cap_elem && bss_elems->vht_cap_elem &&
!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
elems->vht_cap_elem = bss_elems->vht_cap_elem;
sdata_info(sdata,
"AP bug: VHT capa missing from AssocResp\n");
}
if (!elems->vht_operation && bss_elems->vht_operation &&
!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
elems->vht_operation = bss_elems->vht_operation;
sdata_info(sdata,
"AP bug: VHT operation missing from AssocResp\n");
}
kfree(bss_elems);
}
/*
* We previously checked these in the beacon/probe response, so
* they should be present here. This is just a safety net.
*/
if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) &&
(!elems->wmm_param || !elems->ht_cap_elem || !elems->ht_operation)) {
sdata_info(sdata,
"HT AP is missing WMM params or HT capability/operation\n");
ret = false;
goto out;
}
if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) &&
(!elems->vht_cap_elem || !elems->vht_operation)) {
sdata_info(sdata,
"VHT AP is missing VHT capability/operation\n");
ret = false;
goto out;
}
if (is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
!elems->he_6ghz_capa) {
sdata_info(sdata,
"HE 6 GHz AP is missing HE 6 GHz band capability\n");
ret = false;
goto out;
}
if (WARN_ON(!link->conf->chandef.chan)) {
ret = false;
goto out;
}
sband = local->hw.wiphy->bands[link->conf->chandef.chan->band];
if (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
(!elems->he_cap || !elems->he_operation)) {
sdata_info(sdata,
"HE AP is missing HE capability/operation\n");
ret = false;
goto out;
}
/* Set up internal HT/VHT capabilities */
if (elems->ht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT))
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems->ht_cap_elem,
link_sta);
if (elems->vht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT))
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
elems->vht_cap_elem,
link_sta);
if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
elems->he_cap) {
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
elems->he_cap,
elems->he_cap_len,
elems->he_6ghz_capa,
link_sta);
bss_conf->he_support = link_sta->pub->he_cap.has_he;
if (elems->rsnx && elems->rsnx_len &&
(elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) &&
wiphy_ext_feature_isset(local->hw.wiphy,
NL80211_EXT_FEATURE_PROTECTED_TWT))
bss_conf->twt_protected = true;
else
bss_conf->twt_protected = false;
*changed |= ieee80211_recalc_twt_req(sdata, sband, link,
link_sta, elems);
if (elems->eht_operation && elems->eht_cap &&
!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband,
elems->he_cap,
elems->he_cap_len,
elems->eht_cap,
elems->eht_cap_len,
link_sta);
bss_conf->eht_support = link_sta->pub->eht_cap.has_eht;
*changed |= BSS_CHANGED_EHT_PUNCTURING;
} else {
bss_conf->eht_support = false;
}
} else {
bss_conf->he_support = false;
bss_conf->twt_requester = false;
bss_conf->twt_protected = false;
bss_conf->eht_support = false;
}
bss_conf->twt_broadcast =
ieee80211_twt_bcast_support(sdata, bss_conf, sband, link_sta);
if (bss_conf->he_support) {
bss_conf->he_bss_color.color =
le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
bss_conf->he_bss_color.partial =
le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR);
bss_conf->he_bss_color.enabled =
!le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED);
if (bss_conf->he_bss_color.enabled)
*changed |= BSS_CHANGED_HE_BSS_COLOR;
bss_conf->htc_trig_based_pkt_ext =
le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK);
bss_conf->frame_time_rts_th =
le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK);
bss_conf->uora_exists = !!elems->uora_element;
if (elems->uora_element)
bss_conf->uora_ocw_range = elems->uora_element[0];
ieee80211_he_op_ie_to_bss_conf(&sdata->vif, elems->he_operation);
ieee80211_he_spr_ie_to_bss_conf(&sdata->vif, elems->he_spr);
/* TODO: OPEN: what happens if BSS color disable is set? */
}
if (cbss->transmitted_bss) {
bss_conf->nontransmitted = true;
ether_addr_copy(bss_conf->transmitter_bssid,
cbss->transmitted_bss->bssid);
bss_conf->bssid_indicator = cbss->max_bssid_indicator;
bss_conf->bssid_index = cbss->bssid_index;
}
/*
* Some APs, e.g. Netgear WNDR3700, report invalid HT operation data
* in their association response, so ignore that data for our own
* configuration. If it changed since the last beacon, we'll get the
* next beacon and update then.
*/
/*
* If an operating mode notification IE is present, override the
* NSS calculation (that would be done in rate_control_rate_init())
* and use the # of streams from that element.
*/
if (elems->opmode_notif &&
!(*elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)) {
u8 nss;
nss = *elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
link_sta->pub->rx_nss = nss;
}
/*
* Always handle WMM once after association regardless
* of the first value the AP uses. Setting -1 here has
* that effect because the AP values is an unsigned
* 4-bit value.
*/
link->u.mgd.wmm_last_param_set = -1;
link->u.mgd.mu_edca_last_param_set = -1;
if (link->u.mgd.disable_wmm_tracking) {
ieee80211_set_wmm_default(link, false, false);
} else if (!ieee80211_sta_wmm_params(local, link, elems->wmm_param,
elems->wmm_param_len,
elems->mu_edca_param_set)) {
/* still enable QoS since we might have HT/VHT */
ieee80211_set_wmm_default(link, false, true);
/* disable WMM tracking in this case to disable
* tracking WMM parameter changes in the beacon if
* the parameters weren't actually valid. Doing so
* avoids changing parameters very strangely when
* the AP is going back and forth between valid and
* invalid parameters.
*/
link->u.mgd.disable_wmm_tracking = true;
}
if (elems->max_idle_period_ie) {
bss_conf->max_idle_period =
le16_to_cpu(elems->max_idle_period_ie->max_idle_period);
bss_conf->protected_keep_alive =
!!(elems->max_idle_period_ie->idle_options &
WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE);
*changed |= BSS_CHANGED_KEEP_ALIVE;
} else {
bss_conf->max_idle_period = 0;
bss_conf->protected_keep_alive = false;
}
/* set assoc capability (AID was already set earlier),
* ieee80211_set_associated() will tell the driver */
bss_conf->assoc_capability = capab_info;
ret = true;
out:
kfree(elems);
kfree(bss_ies);
return ret;
}
static int ieee80211_mgd_setup_link_sta(struct ieee80211_link_data *link,
struct sta_info *sta,
struct link_sta_info *link_sta,
struct cfg80211_bss *cbss)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_bss *bss = (void *)cbss->priv;
u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit = false;
int min_rate = INT_MAX, min_rate_index = -1;
/* this is clearly wrong for MLO but we'll just remove it later */
int shift = ieee80211_vif_get_shift(&sdata->vif);
struct ieee80211_supported_band *sband;
memcpy(link_sta->addr, cbss->bssid, ETH_ALEN);
memcpy(link_sta->pub->addr, cbss->bssid, ETH_ALEN);
/* TODO: S1G Basic Rate Set is expressed elsewhere */
if (cbss->channel->band == NL80211_BAND_S1GHZ) {
ieee80211_s1g_sta_rate_init(sta);
return 0;
}
sband = local->hw.wiphy->bands[cbss->channel->band];
ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len,
&rates, &basic_rates, &have_higher_than_11mbit,
&min_rate, &min_rate_index, shift);
/*
* This used to be a workaround for basic rates missing
* in the association response frame. Now that we no
* longer use the basic rates from there, it probably
* doesn't happen any more, but keep the workaround so
* in case some *other* APs are buggy in different ways
* we can connect -- with a warning.
* Allow this workaround only in case the AP provided at least
* one rate.
*/
if (min_rate_index < 0) {
link_info(link, "No legacy rates in association response\n");
return -EINVAL;
} else if (!basic_rates) {
link_info(link, "No basic rates, using min rate instead\n");
basic_rates = BIT(min_rate_index);
}
if (rates)
link_sta->pub->supp_rates[cbss->channel->band] = rates;
else
link_info(link, "No rates found, keeping mandatory only\n");
link->conf->basic_rates = basic_rates;
/* cf. IEEE 802.11 9.2.12 */
link->operating_11g_mode = sband->band == NL80211_BAND_2GHZ &&
have_higher_than_11mbit;
return 0;
}
static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
struct cfg80211_bss *cbss)
{
struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
const struct element *ht_cap_elem, *vht_cap_elem;
const struct cfg80211_bss_ies *ies;
const struct ieee80211_ht_cap *ht_cap;
const struct ieee80211_vht_cap *vht_cap;
const struct ieee80211_he_cap_elem *he_cap;
const struct element *he_cap_elem;
u16 mcs_80_map, mcs_160_map;
int i, mcs_nss_size;
bool support_160;
u8 chains = 1;
if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)
return chains;
ht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_CAPABILITY);
if (ht_cap_elem && ht_cap_elem->datalen >= sizeof(*ht_cap)) {
ht_cap = (void *)ht_cap_elem->data;
chains = ieee80211_mcs_to_chains(&ht_cap->mcs);
/*
* TODO: use "Tx Maximum Number Spatial Streams Supported" and
* "Tx Unequal Modulation Supported" fields.
*/
}
if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)
return chains;
vht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
if (vht_cap_elem && vht_cap_elem->datalen >= sizeof(*vht_cap)) {
u8 nss;
u16 tx_mcs_map;
vht_cap = (void *)vht_cap_elem->data;
tx_mcs_map = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map);
for (nss = 8; nss > 0; nss--) {
if (((tx_mcs_map >> (2 * (nss - 1))) & 3) !=
IEEE80211_VHT_MCS_NOT_SUPPORTED)
break;
}
/* TODO: use "Tx Highest Supported Long GI Data Rate" field? */
chains = max(chains, nss);
}
if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE)
return chains;
ies = rcu_dereference(cbss->ies);
he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
ies->data, ies->len);
if (!he_cap_elem || he_cap_elem->datalen < sizeof(*he_cap))
return chains;
/* skip one byte ext_tag_id */
he_cap = (void *)(he_cap_elem->data + 1);
mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap);
/* invalid HE IE */
if (he_cap_elem->datalen < 1 + mcs_nss_size + sizeof(*he_cap))
return chains;
/* mcs_nss is right after he_cap info */
he_mcs_nss_supp = (void *)(he_cap + 1);
mcs_80_map = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
for (i = 7; i >= 0; i--) {
u8 mcs_80 = mcs_80_map >> (2 * i) & 3;
if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
chains = max_t(u8, chains, i + 1);
break;
}
}
support_160 = he_cap->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
if (!support_160)
return chains;
mcs_160_map = le16_to_cpu(he_mcs_nss_supp->tx_mcs_160);
for (i = 7; i >= 0; i--) {
u8 mcs_160 = mcs_160_map >> (2 * i) & 3;
if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
chains = max_t(u8, chains, i + 1);
break;
}
}
return chains;
}
static bool
ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_bss_ies *ies,
const struct ieee80211_he_operation *he_op)
{
const struct element *he_cap_elem;
const struct ieee80211_he_cap_elem *he_cap;
struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
u16 mcs_80_map_tx, mcs_80_map_rx;
u16 ap_min_req_set;
int mcs_nss_size;
int nss;
he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
ies->data, ies->len);
if (!he_cap_elem)
return false;
/* invalid HE IE */
if (he_cap_elem->datalen < 1 + sizeof(*he_cap)) {
sdata_info(sdata,
"Invalid HE elem, Disable HE\n");
return false;
}
/* skip one byte ext_tag_id */
he_cap = (void *)(he_cap_elem->data + 1);
mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap);
/* invalid HE IE */
if (he_cap_elem->datalen < 1 + sizeof(*he_cap) + mcs_nss_size) {
sdata_info(sdata,
"Invalid HE elem with nss size, Disable HE\n");
return false;
}
/* mcs_nss is right after he_cap info */
he_mcs_nss_supp = (void *)(he_cap + 1);
mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80);
/* P802.11-REVme/D0.3
* 27.1.1 Introduction to the HE PHY
* ...
* An HE STA shall support the following features:
* ...
* Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all
* supported channel widths for HE SU PPDUs
*/
if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED ||
(mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) {
sdata_info(sdata,
"Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n",
mcs_80_map_tx, mcs_80_map_rx);
return false;
}
if (!he_op)
return true;
ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
/*
* Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
* zeroes, which is nonsense, and completely inconsistent with itself
* (it doesn't have 8 streams). Accept the settings in this case anyway.
*/
if (!ap_min_req_set)
return true;
/* make sure the AP is consistent with itself
*
* P802.11-REVme/D0.3
* 26.17.1 Basic HE BSS operation
*
* A STA that is operating in an HE BSS shall be able to receive and
* transmit at each of the <HE-MCS, NSS> tuple values indicated by the
* Basic HE-MCS And NSS Set field of the HE Operation parameter of the
* MLME-START.request primitive and shall be able to receive at each of
* the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and
* NSS Set field in the HE Capabilities parameter of the MLMESTART.request
* primitive
*/
for (nss = 8; nss > 0; nss--) {
u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
u8 ap_rx_val;
u8 ap_tx_val;
if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
continue;
ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3;
ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3;
if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) {
sdata_info(sdata,
"Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n",
nss, ap_rx_val, ap_rx_val, ap_op_val);
return false;
}
}
return true;
}
static bool
ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_he_operation *he_op)
{
const struct ieee80211_sta_he_cap *sta_he_cap =
ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
u16 ap_min_req_set;
int i;
if (!sta_he_cap || !he_op)
return false;
ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
/*
* Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
* zeroes, which is nonsense, and completely inconsistent with itself
* (it doesn't have 8 streams). Accept the settings in this case anyway.
*/
if (!ap_min_req_set)
return true;
/* Need to go over for 80MHz, 160MHz and for 80+80 */
for (i = 0; i < 3; i++) {
const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp =
&sta_he_cap->he_mcs_nss_supp;
u16 sta_mcs_map_rx =
le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]);
u16 sta_mcs_map_tx =
le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]);
u8 nss;
bool verified = true;
/*
* For each band there is a maximum of 8 spatial streams
* possible. Each of the sta_mcs_map_* is a 16-bit struct built
* of 2 bits per NSS (1-8), with the values defined in enum
* ieee80211_he_mcs_support. Need to make sure STA TX and RX
* capabilities aren't less than the AP's minimum requirements
* for this HE BSS per SS.
* It is enough to find one such band that meets the reqs.
*/
for (nss = 8; nss > 0; nss--) {
u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3;
u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3;
u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
continue;
/*
* Make sure the HE AP doesn't require MCSs that aren't
* supported by the client as required by spec
*
* P802.11-REVme/D0.3
* 26.17.1 Basic HE BSS operation
*
* An HE STA shall not attempt to join * (MLME-JOIN.request primitive)
* a BSS, unless it supports (i.e., is able to both transmit and
* receive using) all of the <HE-MCS, NSS> tuples in the basic
* HE-MCS and NSS set.
*/
if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
(ap_val > sta_rx_val) || (ap_val > sta_tx_val)) {
verified = false;
break;
}
}
if (verified)
return true;
}
/* If here, STA doesn't meet AP's HE min requirements */
return false;
}
static u8
ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap,
const struct ieee80211_sta_eht_cap *sta_eht_cap,
unsigned int idx, int bw)
{
u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0];
u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0];
/* handle us being a 20 MHz-only EHT STA - with four values
* for MCS 0-7, 8-9, 10-11, 12-13.
*/
if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL))
return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx];
/* the others have MCS 0-9 together, rather than separately from 0-7 */
if (idx > 0)
idx--;
switch (bw) {
case 0:
return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx];
case 1:
if (!(he_phy_cap0 &
(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)))
return 0xff; /* pass check */
return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx];
case 2:
if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ))
return 0xff; /* pass check */
return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx];
}
WARN_ON(1);
return 0;
}
static bool
ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_eht_operation *eht_op)
{
const struct ieee80211_sta_he_cap *sta_he_cap =
ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
const struct ieee80211_sta_eht_cap *sta_eht_cap =
ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req;
unsigned int i;
if (!sta_he_cap || !sta_eht_cap || !eht_op)
return false;
req = &eht_op->basic_mcs_nss;
for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) {
u8 req_rx_nss, req_tx_nss;
unsigned int bw;
req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i],
IEEE80211_EHT_MCS_NSS_RX);
req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i],
IEEE80211_EHT_MCS_NSS_TX);
for (bw = 0; bw < 3; bw++) {
u8 have, have_rx_nss, have_tx_nss;
have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap,
sta_eht_cap,
i, bw);
have_rx_nss = u8_get_bits(have,
IEEE80211_EHT_MCS_NSS_RX);
have_tx_nss = u8_get_bits(have,
IEEE80211_EHT_MCS_NSS_TX);
if (req_rx_nss > have_rx_nss ||
req_tx_nss > have_tx_nss)
return false;
}
}
return true;
}
static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
struct cfg80211_bss *cbss,
ieee80211_conn_flags_t *conn_flags)
{
struct ieee80211_local *local = sdata->local;
const struct ieee80211_ht_cap *ht_cap = NULL;
const struct ieee80211_ht_operation *ht_oper = NULL;
const struct ieee80211_vht_operation *vht_oper = NULL;
const struct ieee80211_he_operation *he_oper = NULL;
const struct ieee80211_eht_operation *eht_oper = NULL;
const struct ieee80211_s1g_oper_ie *s1g_oper = NULL;
struct ieee80211_supported_band *sband;
struct cfg80211_chan_def chandef;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
struct ieee80211_bss *bss = (void *)cbss->priv;
struct ieee80211_elems_parse_params parse_params = {
.link_id = -1,
.from_ap = true,
};
struct ieee802_11_elems *elems;
const struct cfg80211_bss_ies *ies;
int ret;
u32 i;
bool have_80mhz;
rcu_read_lock();
ies = rcu_dereference(cbss->ies);
parse_params.start = ies->data;
parse_params.len = ies->len;
elems = ieee802_11_parse_elems_full(&parse_params);
if (!elems) {
rcu_read_unlock();
return -ENOMEM;
}
sband = local->hw.wiphy->bands[cbss->channel->band];
*conn_flags &= ~(IEEE80211_CONN_DISABLE_40MHZ |
IEEE80211_CONN_DISABLE_80P80MHZ |
IEEE80211_CONN_DISABLE_160MHZ);
/* disable HT/VHT/HE if we don't support them */
if (!sband->ht_cap.ht_supported && !is_6ghz) {
mlme_dbg(sdata, "HT not supported, disabling HT/VHT/HE/EHT\n");
*conn_flags |= IEEE80211_CONN_DISABLE_HT;
*conn_flags |= IEEE80211_CONN_DISABLE_VHT;
*conn_flags |= IEEE80211_CONN_DISABLE_HE;
*conn_flags |= IEEE80211_CONN_DISABLE_EHT;
}
if (!sband->vht_cap.vht_supported && is_5ghz) {
mlme_dbg(sdata, "VHT not supported, disabling VHT/HE/EHT\n");
*conn_flags |= IEEE80211_CONN_DISABLE_VHT;
*conn_flags |= IEEE80211_CONN_DISABLE_HE;
*conn_flags |= IEEE80211_CONN_DISABLE_EHT;
}
if (!ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
mlme_dbg(sdata, "HE not supported, disabling HE and EHT\n");
*conn_flags |= IEEE80211_CONN_DISABLE_HE;
*conn_flags |= IEEE80211_CONN_DISABLE_EHT;
}
if (!ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) {
mlme_dbg(sdata, "EHT not supported, disabling EHT\n");
*conn_flags |= IEEE80211_CONN_DISABLE_EHT;
}
if (!(*conn_flags & IEEE80211_CONN_DISABLE_HT) && !is_6ghz) {
ht_oper = elems->ht_operation;
ht_cap = elems->ht_cap_elem;
if (!ht_cap) {
*conn_flags |= IEEE80211_CONN_DISABLE_HT;
ht_oper = NULL;
}
}
if (!(*conn_flags & IEEE80211_CONN_DISABLE_VHT) && !is_6ghz) {
vht_oper = elems->vht_operation;
if (vht_oper && !ht_oper) {
vht_oper = NULL;
sdata_info(sdata,
"AP advertised VHT without HT, disabling HT/VHT/HE\n");
*conn_flags |= IEEE80211_CONN_DISABLE_HT;
*conn_flags |= IEEE80211_CONN_DISABLE_VHT;
*conn_flags |= IEEE80211_CONN_DISABLE_HE;
*conn_flags |= IEEE80211_CONN_DISABLE_EHT;
}
if (!elems->vht_cap_elem) {
*conn_flags |= IEEE80211_CONN_DISABLE_VHT;
vht_oper = NULL;
}
}
if (!(*conn_flags & IEEE80211_CONN_DISABLE_HE)) {
he_oper = elems->he_operation;
if (link && is_6ghz) {
struct ieee80211_bss_conf *bss_conf;
u8 j = 0;
bss_conf = link->conf;
if (elems->pwr_constr_elem)
bss_conf->pwr_reduction = *elems->pwr_constr_elem;
BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) !=
ARRAY_SIZE(elems->tx_pwr_env));
for (i = 0; i < elems->tx_pwr_env_num; i++) {
if (elems->tx_pwr_env_len[i] >
sizeof(bss_conf->tx_pwr_env[j]))
continue;
bss_conf->tx_pwr_env_num++;
memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i],
elems->tx_pwr_env_len[i]);
j++;
}
}
if (!ieee80211_verify_peer_he_mcs_support(sdata, ies, he_oper) ||
!ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper))
*conn_flags |= IEEE80211_CONN_DISABLE_HE |
IEEE80211_CONN_DISABLE_EHT;
}
/*
* EHT requires HE to be supported as well. Specifically for 6 GHz
* channels, the operation channel information can only be deduced from
* both the 6 GHz operation information (from the HE operation IE) and
* EHT operation.
*/
if (!(*conn_flags &
(IEEE80211_CONN_DISABLE_HE |
IEEE80211_CONN_DISABLE_EHT)) &&
he_oper) {
const struct cfg80211_bss_ies *cbss_ies;
const struct element *eht_ml_elem;
const u8 *eht_oper_ie;
cbss_ies = rcu_dereference(cbss->ies);
eht_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_EHT_OPERATION,
cbss_ies->data, cbss_ies->len);
if (eht_oper_ie && eht_oper_ie[1] >=
1 + sizeof(struct ieee80211_eht_operation))
eht_oper = (void *)(eht_oper_ie + 3);
else
eht_oper = NULL;
if (!ieee80211_verify_sta_eht_mcs_support(sdata, sband, eht_oper))
*conn_flags |= IEEE80211_CONN_DISABLE_EHT;
eht_ml_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
cbss_ies->data, cbss_ies->len);
/* data + 1 / datalen - 1 since it's an extended element */
if (!(*conn_flags & IEEE80211_CONN_DISABLE_EHT) &&
eht_ml_elem &&
ieee80211_mle_type_ok(eht_ml_elem->data + 1,
IEEE80211_ML_CONTROL_TYPE_BASIC,
eht_ml_elem->datalen - 1)) {
sdata->vif.cfg.eml_cap =
ieee80211_mle_get_eml_cap(eht_ml_elem->data + 1);
sdata->vif.cfg.eml_med_sync_delay =
ieee80211_mle_get_eml_med_sync_delay(eht_ml_elem->data + 1);
}
}
/* Allow VHT if at least one channel on the sband supports 80 MHz */
have_80mhz = false;
for (i = 0; i < sband->n_channels; i++) {
if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
IEEE80211_CHAN_NO_80MHZ))
continue;
have_80mhz = true;
break;
}
if (!have_80mhz) {
sdata_info(sdata, "80 MHz not supported, disabling VHT\n");
*conn_flags |= IEEE80211_CONN_DISABLE_VHT;
}
if (sband->band == NL80211_BAND_S1GHZ) {
s1g_oper = elems->s1g_oper;
if (!s1g_oper)
sdata_info(sdata,
"AP missing S1G operation element?\n");
}
*conn_flags |=
ieee80211_determine_chantype(sdata, link, *conn_flags,
sband,
cbss->channel,
bss->vht_cap_info,
ht_oper, vht_oper,
he_oper, eht_oper,
s1g_oper,
&chandef, false);
if (link)
link->needed_rx_chains =
min(ieee80211_max_rx_chains(link, cbss),
local->rx_chains);
rcu_read_unlock();
/* the element data was RCU protected so no longer valid anyway */
kfree(elems);
elems = NULL;
if (*conn_flags & IEEE80211_CONN_DISABLE_HE && is_6ghz) {
sdata_info(sdata, "Rejecting non-HE 6/7 GHz connection");
return -EINVAL;
}
if (!link)
return 0;
/* will change later if needed */
link->smps_mode = IEEE80211_SMPS_OFF;
mutex_lock(&local->mtx);
/*
* If this fails (possibly due to channel context sharing
* on incompatible channels, e.g. 80+80 and 160 sharing the
* same control channel) try to use a smaller bandwidth.
*/
ret = ieee80211_link_use_channel(link, &chandef,
IEEE80211_CHANCTX_SHARED);
/* don't downgrade for 5 and 10 MHz channels, though. */
if (chandef.width == NL80211_CHAN_WIDTH_5 ||
chandef.width == NL80211_CHAN_WIDTH_10)
goto out;
while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
*conn_flags |=
ieee80211_chandef_downgrade(&chandef);
ret = ieee80211_link_use_channel(link, &chandef,
IEEE80211_CHANCTX_SHARED);
}
out:
mutex_unlock(&local->mtx);
return ret;
}
static bool ieee80211_get_dtim(const struct cfg80211_bss_ies *ies,
u8 *dtim_count, u8 *dtim_period)
{
const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
const u8 *idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, ies->data,
ies->len);
const struct ieee80211_tim_ie *tim = NULL;
const struct ieee80211_bssid_index *idx;
bool valid = tim_ie && tim_ie[1] >= 2;
if (valid)
tim = (void *)(tim_ie + 2);
if (dtim_count)
*dtim_count = valid ? tim->dtim_count : 0;
if (dtim_period)
*dtim_period = valid ? tim->dtim_period : 0;
/* Check if value is overridden by non-transmitted profile */
if (!idx_ie || idx_ie[1] < 3)
return valid;
idx = (void *)(idx_ie + 2);
if (dtim_count)
*dtim_count = idx->dtim_count;
if (dtim_period)
*dtim_period = idx->dtim_period;
return true;
}
static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
struct ieee802_11_elems *elems,
const u8 *elem_start, unsigned int elem_len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
struct ieee80211_local *local = sdata->local;
unsigned int link_id;
struct sta_info *sta;
u64 changed[IEEE80211_MLD_MAX_NUM_LINKS] = {};
u16 valid_links = 0, dormant_links = 0;
int err;
mutex_lock(&sdata->local->sta_mtx);
/*
* station info was already allocated and inserted before
* the association and should be available to us
*/
sta = sta_info_get(sdata, assoc_data->ap_addr);
if (WARN_ON(!sta))
goto out_err;
if (ieee80211_vif_is_mld(&sdata->vif)) {
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
if (!assoc_data->link[link_id].bss)
continue;
valid_links |= BIT(link_id);
if (assoc_data->link[link_id].disabled) {
dormant_links |= BIT(link_id);
} else if (link_id != assoc_data->assoc_link_id) {
err = ieee80211_sta_allocate_link(sta, link_id);
if (err)
goto out_err;
}
}
ieee80211_vif_set_links(sdata, valid_links, dormant_links);
}
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
struct ieee80211_link_data *link;
struct link_sta_info *link_sta;
if (!cbss || assoc_data->link[link_id].disabled)
continue;
link = sdata_dereference(sdata->link[link_id], sdata);
if (WARN_ON(!link))
goto out_err;
if (ieee80211_vif_is_mld(&sdata->vif))
link_info(link,
"local address %pM, AP link address %pM%s\n",
link->conf->addr,
assoc_data->link[link_id].bss->bssid,
link_id == assoc_data->assoc_link_id ?
" (assoc)" : "");
link_sta = rcu_dereference_protected(sta->link[link_id],
lockdep_is_held(&local->sta_mtx));
if (WARN_ON(!link_sta))
goto out_err;
if (!link->u.mgd.have_beacon) {
const struct cfg80211_bss_ies *ies;
rcu_read_lock();
ies = rcu_dereference(cbss->beacon_ies);
if (ies)
link->u.mgd.have_beacon = true;
else
ies = rcu_dereference(cbss->ies);
ieee80211_get_dtim(ies,
&link->conf->sync_dtim_count,
&link->u.mgd.dtim_period);
link->conf->beacon_int = cbss->beacon_interval;
rcu_read_unlock();
}
link->conf->dtim_period = link->u.mgd.dtim_period ?: 1;
if (link_id != assoc_data->assoc_link_id) {
err = ieee80211_prep_channel(sdata, link, cbss,
&link->u.mgd.conn_flags);
if (err) {
link_info(link, "prep_channel failed\n");
goto out_err;
}
}
err = ieee80211_mgd_setup_link_sta(link, sta, link_sta,
assoc_data->link[link_id].bss);
if (err)
goto out_err;
if (!ieee80211_assoc_config_link(link, link_sta,
assoc_data->link[link_id].bss,
mgmt, elem_start, elem_len,
&changed[link_id]))
goto out_err;
if (assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) {
valid_links &= ~BIT(link_id);
ieee80211_sta_remove_link(sta, link_id);
continue;
}
if (link_id != assoc_data->assoc_link_id) {
err = ieee80211_sta_activate_link(sta, link_id);
if (err)
goto out_err;
}
}
/* links might have changed due to rejected ones, set them again */
ieee80211_vif_set_links(sdata, valid_links, dormant_links);
rate_control_rate_init(sta);
if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
set_sta_flag(sta, WLAN_STA_MFP);
sta->sta.mfp = true;
} else {
sta->sta.mfp = false;
}
ieee80211_sta_set_max_amsdu_subframes(sta, elems->ext_capab,
elems->ext_capab_len);
sta->sta.wme = (elems->wmm_param || elems->s1g_capab) &&
local->hw.queues >= IEEE80211_NUM_ACS;
err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
if (err) {
sdata_info(sdata,
"failed to move station %pM to desired state\n",
sta->sta.addr);
WARN_ON(__sta_info_destroy(sta));
goto out_err;
}
if (sdata->wdev.use_4addr)
drv_sta_set_4addr(local, sdata, &sta->sta, true);
mutex_unlock(&sdata->local->sta_mtx);
ieee80211_set_associated(sdata, assoc_data, changed);
/*
* If we're using 4-addr mode, let the AP know that we're
* doing so, so that it can create the STA VLAN on its side
*/
if (ifmgd->use_4addr)
ieee80211_send_4addr_nullfunc(local, sdata);
/*
* Start timer to probe the connection to the AP now.
* Also start the timer that will detect beacon loss.
*/
ieee80211_sta_reset_beacon_monitor(sdata);
ieee80211_sta_reset_conn_monitor(sdata);
return true;
out_err:
eth_zero_addr(sdata->vif.cfg.ap_addr);
mutex_unlock(&sdata->local->sta_mtx);
return false;
}
static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
u16 capab_info, status_code, aid;
struct ieee80211_elems_parse_params parse_params = {
.bss = NULL,
.link_id = -1,
.from_ap = true,
};
struct ieee802_11_elems *elems;
int ac;
const u8 *elem_start;
unsigned int elem_len;
bool reassoc;
struct ieee80211_event event = {
.type = MLME_EVENT,
.u.mlme.data = ASSOC_EVENT,
};
struct ieee80211_prep_tx_info info = {};
struct cfg80211_rx_assoc_resp resp = {
.uapsd_queues = -1,
};
u8 ap_mld_addr[ETH_ALEN] __aligned(2);
unsigned int link_id;
sdata_assert_lock(sdata);
if (!assoc_data)
return;
if (!ether_addr_equal(assoc_data->ap_addr, mgmt->bssid) ||
!ether_addr_equal(assoc_data->ap_addr, mgmt->sa))
return;
/*
* AssocResp and ReassocResp have identical structure, so process both
* of them in this function.
*/
if (len < 24 + 6)
return;
reassoc = ieee80211_is_reassoc_resp(mgmt->frame_control);
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
if (assoc_data->s1g)
elem_start = mgmt->u.s1g_assoc_resp.variable;
else
elem_start = mgmt->u.assoc_resp.variable;
/*
* Note: this may not be perfect, AP might misbehave - if
* anyone needs to rely on perfect complete notification
* with the exact right subtype, then we need to track what
* we actually transmitted.
*/
info.subtype = reassoc ? IEEE80211_STYPE_REASSOC_REQ :
IEEE80211_STYPE_ASSOC_REQ;
if (assoc_data->fils_kek_len &&
fils_decrypt_assoc_resp(sdata, (u8 *)mgmt, &len, assoc_data) < 0)
return;
elem_len = len - (elem_start - (u8 *)mgmt);
parse_params.start = elem_start;
parse_params.len = elem_len;
elems = ieee802_11_parse_elems_full(&parse_params);
if (!elems)
goto notify_driver;
if (elems->aid_resp)
aid = le16_to_cpu(elems->aid_resp->aid);
else if (assoc_data->s1g)
aid = 0; /* TODO */
else
aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
/*
* The 5 MSB of the AID field are reserved
* (802.11-2016 9.4.1.8 AID field)
*/
aid &= 0x7ff;
sdata_info(sdata,
"RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n",
reassoc ? "Rea" : "A", assoc_data->ap_addr,
capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
ifmgd->broken_ap = false;
if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
elems->timeout_int &&
elems->timeout_int->type == WLAN_TIMEOUT_ASSOC_COMEBACK) {
u32 tu, ms;
cfg80211_assoc_comeback(sdata->dev, assoc_data->ap_addr,
le32_to_cpu(elems->timeout_int->value));
tu = le32_to_cpu(elems->timeout_int->value);
ms = tu * 1024 / 1000;
sdata_info(sdata,
"%pM rejected association temporarily; comeback duration %u TU (%u ms)\n",
assoc_data->ap_addr, tu, ms);
assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
assoc_data->timeout_started = true;
if (ms > IEEE80211_ASSOC_TIMEOUT)
run_again(sdata, assoc_data->timeout);
goto notify_driver;
}
if (status_code != WLAN_STATUS_SUCCESS) {
sdata_info(sdata, "%pM denied association (code=%d)\n",
assoc_data->ap_addr, status_code);
event.u.mlme.status = MLME_DENIED;
event.u.mlme.reason = status_code;
drv_event_callback(sdata->local, sdata, &event);
} else {
if (aid == 0 || aid > IEEE80211_MAX_AID) {
sdata_info(sdata,
"invalid AID value %d (out of range), turn off PS\n",
aid);
aid = 0;
ifmgd->broken_ap = true;
}
if (ieee80211_vif_is_mld(&sdata->vif)) {
if (!elems->ml_basic) {
sdata_info(sdata,
"MLO association with %pM but no multi-link element in response!\n",
assoc_data->ap_addr);
goto abandon_assoc;
}
if (le16_get_bits(elems->ml_basic->control,
IEEE80211_ML_CONTROL_TYPE) !=
IEEE80211_ML_CONTROL_TYPE_BASIC) {
sdata_info(sdata,
"bad multi-link element (control=0x%x)\n",
le16_to_cpu(elems->ml_basic->control));
goto abandon_assoc;
} else {
struct ieee80211_mle_basic_common_info *common;
common = (void *)elems->ml_basic->variable;
if (memcmp(assoc_data->ap_addr,
common->mld_mac_addr, ETH_ALEN)) {
sdata_info(sdata,
"AP MLD MAC address mismatch: got %pM expected %pM\n",
common->mld_mac_addr,
assoc_data->ap_addr);
goto abandon_assoc;
}
}
}
sdata->vif.cfg.aid = aid;
if (!ieee80211_assoc_success(sdata, mgmt, elems,
elem_start, elem_len)) {
/* oops -- internal error -- send timeout for now */
ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT);
goto notify_driver;
}
event.u.mlme.status = MLME_SUCCESS;
drv_event_callback(sdata->local, sdata, &event);
sdata_info(sdata, "associated\n");
info.success = 1;
}
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
struct ieee80211_link_data *link;
link = sdata_dereference(sdata->link[link_id], sdata);
if (!link)
continue;
if (!assoc_data->link[link_id].bss)
continue;
resp.links[link_id].bss = assoc_data->link[link_id].bss;
resp.links[link_id].addr = link->conf->addr;
resp.links[link_id].status = assoc_data->link[link_id].status;
/* get uapsd queues configuration - same for all links */
resp.uapsd_queues = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
if (link->tx_conf[ac].uapsd)
resp.uapsd_queues |= ieee80211_ac_to_qos_mask[ac];
}
if (ieee80211_vif_is_mld(&sdata->vif)) {
ether_addr_copy(ap_mld_addr, sdata->vif.cfg.ap_addr);
resp.ap_mld_addr = ap_mld_addr;
}
ieee80211_destroy_assoc_data(sdata,
status_code == WLAN_STATUS_SUCCESS ?
ASSOC_SUCCESS :
ASSOC_REJECTED);
resp.buf = (u8 *)mgmt;
resp.len = len;
resp.req_ies = ifmgd->assoc_req_ies;
resp.req_ies_len = ifmgd->assoc_req_ies_len;
cfg80211_rx_assoc_resp(sdata->dev, &resp);
notify_driver:
drv_mgd_complete_tx(sdata->local, sdata, &info);
kfree(elems);
return;
abandon_assoc:
ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON);
goto notify_driver;
}
static void ieee80211_rx_bss_info(struct ieee80211_link_data *link,
struct ieee80211_mgmt *mgmt, size_t len,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_bss *bss;
struct ieee80211_channel *channel;
sdata_assert_lock(sdata);
channel = ieee80211_get_channel_khz(local->hw.wiphy,
ieee80211_rx_status_to_khz(rx_status));
if (!channel)
return;
bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, channel);
if (bss) {
link->conf->beacon_rate = bss->beacon_rate;
ieee80211_rx_bss_put(local, bss);
}
}
static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_link_data *link,
struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_if_managed *ifmgd;
struct ieee80211_rx_status *rx_status = (void *) skb->cb;
struct ieee80211_channel *channel;
size_t baselen, len = skb->len;
ifmgd = &sdata->u.mgd;
sdata_assert_lock(sdata);
/*
* According to Draft P802.11ax D6.0 clause 26.17.2.3.2:
* "If a 6 GHz AP receives a Probe Request frame and responds with
* a Probe Response frame [..], the Address 1 field of the Probe
* Response frame shall be set to the broadcast address [..]"
* So, on 6GHz band we should also accept broadcast responses.
*/
channel = ieee80211_get_channel(sdata->local->hw.wiphy,
rx_status->freq);
if (!channel)
return;
if (!ether_addr_equal(mgmt->da, sdata->vif.addr) &&
(channel->band != NL80211_BAND_6GHZ ||
!is_broadcast_ether_addr(mgmt->da)))
return; /* ignore ProbeResp to foreign address */
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
if (baselen > len)
return;
ieee80211_rx_bss_info(link, mgmt, len, rx_status);
if (ifmgd->associated &&
ether_addr_equal(mgmt->bssid, link->u.mgd.bssid))
ieee80211_reset_ap_probe(sdata);
}
/*
* This is the canonical list of information elements we care about,
* the filter code also gives us all changes to the Microsoft OUI
* (00:50:F2) vendor IE which is used for WMM which we need to track,
* as well as the DTPC IE (part of the Cisco OUI) used for signaling
* changes to requested client power.
*
* We implement beacon filtering in software since that means we can
* avoid processing the frame here and in cfg80211, and userspace
* will not be able to tell whether the hardware supports it or not.
*
* XXX: This list needs to be dynamic -- userspace needs to be able to
* add items it requires. It also needs to be able to tell us to
* look out for other vendor IEs.
*/
static const u64 care_about_ies =
(1ULL << WLAN_EID_COUNTRY) |
(1ULL << WLAN_EID_ERP_INFO) |
(1ULL << WLAN_EID_CHANNEL_SWITCH) |
(1ULL << WLAN_EID_PWR_CONSTRAINT) |
(1ULL << WLAN_EID_HT_CAPABILITY) |
(1ULL << WLAN_EID_HT_OPERATION) |
(1ULL << WLAN_EID_EXT_CHANSWITCH_ANN);
static void ieee80211_handle_beacon_sig(struct ieee80211_link_data *link,
struct ieee80211_if_managed *ifmgd,
struct ieee80211_bss_conf *bss_conf,
struct ieee80211_local *local,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
/* Track average RSSI from the Beacon frames of the current AP */
if (!link->u.mgd.tracking_signal_avg) {
link->u.mgd.tracking_signal_avg = true;
ewma_beacon_signal_init(&link->u.mgd.ave_beacon_signal);
link->u.mgd.last_cqm_event_signal = 0;
link->u.mgd.count_beacon_signal = 1;
link->u.mgd.last_ave_beacon_signal = 0;
} else {
link->u.mgd.count_beacon_signal++;
}
ewma_beacon_signal_add(&link->u.mgd.ave_beacon_signal,
-rx_status->signal);
if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold &&
link->u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
int sig = -ewma_beacon_signal_read(&link->u.mgd.ave_beacon_signal);
int last_sig = link->u.mgd.last_ave_beacon_signal;
struct ieee80211_event event = {
.type = RSSI_EVENT,
};
/*
* if signal crosses either of the boundaries, invoke callback
* with appropriate parameters
*/
if (sig > ifmgd->rssi_max_thold &&
(last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) {
link->u.mgd.last_ave_beacon_signal = sig;
event.u.rssi.data = RSSI_EVENT_HIGH;
drv_event_callback(local, sdata, &event);
} else if (sig < ifmgd->rssi_min_thold &&
(last_sig >= ifmgd->rssi_max_thold ||
last_sig == 0)) {
link->u.mgd.last_ave_beacon_signal = sig;
event.u.rssi.data = RSSI_EVENT_LOW;
drv_event_callback(local, sdata, &event);
}
}
if (bss_conf->cqm_rssi_thold &&
link->u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
!(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) {
int sig = -ewma_beacon_signal_read(&link->u.mgd.ave_beacon_signal);
int last_event = link->u.mgd.last_cqm_event_signal;
int thold = bss_conf->cqm_rssi_thold;
int hyst = bss_conf->cqm_rssi_hyst;
if (sig < thold &&
(last_event == 0 || sig < last_event - hyst)) {
link->u.mgd.last_cqm_event_signal = sig;
ieee80211_cqm_rssi_notify(
&sdata->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
sig, GFP_KERNEL);
} else if (sig > thold &&
(last_event == 0 || sig > last_event + hyst)) {
link->u.mgd.last_cqm_event_signal = sig;
ieee80211_cqm_rssi_notify(
&sdata->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
sig, GFP_KERNEL);
}
}
if (bss_conf->cqm_rssi_low &&
link->u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
int sig = -ewma_beacon_signal_read(&link->u.mgd.ave_beacon_signal);
int last_event = link->u.mgd.last_cqm_event_signal;
int low = bss_conf->cqm_rssi_low;
int high = bss_conf->cqm_rssi_high;
if (sig < low &&
(last_event == 0 || last_event >= low)) {
link->u.mgd.last_cqm_event_signal = sig;
ieee80211_cqm_rssi_notify(
&sdata->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
sig, GFP_KERNEL);
} else if (sig > high &&
(last_event == 0 || last_event <= high)) {
link->u.mgd.last_cqm_event_signal = sig;
ieee80211_cqm_rssi_notify(
&sdata->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
sig, GFP_KERNEL);
}
}
}
static bool ieee80211_rx_our_beacon(const u8 *tx_bssid,
struct cfg80211_bss *bss)
{
if (ether_addr_equal(tx_bssid, bss->bssid))
return true;
if (!bss->transmitted_bss)
return false;
return ether_addr_equal(tx_bssid, bss->transmitted_bss->bssid);
}
static bool ieee80211_config_puncturing(struct ieee80211_link_data *link,
const struct ieee80211_eht_operation *eht_oper,
u64 *changed)
{
u16 bitmap = 0, extracted;
if ((eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
(eht_oper->params &
IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
const struct ieee80211_eht_operation_info *info =
(void *)eht_oper->optional;
const u8 *disable_subchannel_bitmap = info->optional;
bitmap = get_unaligned_le16(disable_subchannel_bitmap);
}
extracted = ieee80211_extract_dis_subch_bmap(eht_oper,
&link->conf->chandef,
bitmap);
/* accept if there are no changes */
if (!(*changed & BSS_CHANGED_BANDWIDTH) &&
extracted == link->conf->eht_puncturing)
return true;
if (!cfg80211_valid_disable_subchannel_bitmap(&bitmap,
&link->conf->chandef)) {
link_info(link,
"Got an invalid disable subchannel bitmap from AP %pM: bitmap = 0x%x, bw = 0x%x. disconnect\n",
link->u.mgd.bssid,
bitmap,
link->conf->chandef.width);
return false;
}
ieee80211_handle_puncturing_bitmap(link, eht_oper, bitmap, changed);
return true;
}
static void ieee80211_ml_reconf_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
u.mgd.ml_reconf_work.work);
u16 new_valid_links, new_active_links, new_dormant_links;
int ret;
sdata_lock(sdata);
if (!sdata->u.mgd.removed_links) {
sdata_unlock(sdata);
return;
}
sdata_info(sdata,
"MLO Reconfiguration: work: valid=0x%x, removed=0x%x\n",
sdata->vif.valid_links, sdata->u.mgd.removed_links);
new_valid_links = sdata->vif.valid_links & ~sdata->u.mgd.removed_links;
if (new_valid_links == sdata->vif.valid_links) {
sdata_unlock(sdata);
return;
}
if (!new_valid_links ||
!(new_valid_links & ~sdata->vif.dormant_links)) {
sdata_info(sdata, "No valid links after reconfiguration\n");
ret = -EINVAL;
goto out;
}
new_active_links = sdata->vif.active_links & ~sdata->u.mgd.removed_links;
if (new_active_links != sdata->vif.active_links) {
if (!new_active_links)
new_active_links =
BIT(ffs(new_valid_links &
~sdata->vif.dormant_links) - 1);
ret = __ieee80211_set_active_links(&sdata->vif,
new_active_links);
if (ret) {
sdata_info(sdata,
"Failed setting active links\n");
goto out;
}
}
new_dormant_links = sdata->vif.dormant_links & ~sdata->u.mgd.removed_links;
ret = ieee80211_vif_set_links(sdata, new_valid_links,
new_dormant_links);
if (ret)
sdata_info(sdata, "Failed setting valid links\n");
out:
if (!ret)
cfg80211_links_removed(sdata->dev, sdata->u.mgd.removed_links);
else
___ieee80211_disconnect(sdata);
sdata->u.mgd.removed_links = 0;
sdata_unlock(sdata);
}
static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems)
{
const struct ieee80211_multi_link_elem *ml;
const struct element *sub;
size_t ml_len;
unsigned long removed_links = 0;
u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {};
u8 link_id;
u32 delay;
if (!ieee80211_vif_is_mld(&sdata->vif) || !elems->ml_reconf)
return;
ml_len = cfg80211_defragment_element(elems->ml_reconf_elem,
elems->ie_start,
elems->total_len,
elems->scratch_pos,
elems->scratch + elems->scratch_len -
elems->scratch_pos,
WLAN_EID_FRAGMENT);
elems->ml_reconf = (const void *)elems->scratch_pos;
elems->ml_reconf_len = ml_len;
ml = elems->ml_reconf;
/* Directly parse the sub elements as the common information doesn't
* hold any useful information.
*/
for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
u8 *pos = prof->variable;
u16 control;
if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
continue;
if (!ieee80211_mle_reconf_sta_prof_size_ok(sub->data,
sub->datalen))
return;
control = le16_to_cpu(prof->control);
link_id = control & IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID;
removed_links |= BIT(link_id);
/* the MAC address should not be included, but handle it */
if (control &
IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT)
pos += 6;
/* According to Draft P802.11be_D3.0, the control should
* include the AP Removal Timer present. If the AP Removal Timer
* is not present assume immediate removal.
*/
if (control &
IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
link_removal_timeout[link_id] = le16_to_cpu(*(__le16 *)pos);
}
removed_links &= sdata->vif.valid_links;
if (!removed_links) {
/* In case the removal was cancelled, abort it */
if (sdata->u.mgd.removed_links) {
sdata->u.mgd.removed_links = 0;
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
&sdata->u.mgd.ml_reconf_work);
}
return;
}
delay = 0;
for_each_set_bit(link_id, &removed_links, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf =
sdata_dereference(sdata->vif.link_conf[link_id], sdata);
u32 link_delay;
if (!link_conf) {
removed_links &= ~BIT(link_id);
continue;
}
link_delay = link_conf->beacon_int *
link_removal_timeout[link_id];
if (!delay)
delay = link_delay;
else
delay = min(delay, link_delay);
}
sdata->u.mgd.removed_links = removed_links;
wiphy_delayed_work_queue(sdata->local->hw.wiphy,
&sdata->u.mgd.ml_reconf_work,
TU_TO_JIFFIES(delay));
}
static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
struct ieee80211_hdr *hdr, size_t len,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
struct ieee80211_mgmt *mgmt = (void *) hdr;
size_t baselen;
struct ieee802_11_elems *elems;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
struct link_sta_info *link_sta;
struct sta_info *sta;
u64 changed = 0;
bool erp_valid;
u8 erp_value = 0;
u32 ncrc = 0;
u8 *bssid, *variable = mgmt->u.beacon.variable;
u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
struct ieee80211_elems_parse_params parse_params = {
.link_id = -1,
.from_ap = true,
};
sdata_assert_lock(sdata);
/* Process beacon from the current BSS */
bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type);
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
struct ieee80211_ext *ext = (void *) mgmt;
if (ieee80211_is_s1g_short_beacon(ext->frame_control))
variable = ext->u.s1g_short_beacon.variable;
else
variable = ext->u.s1g_beacon.variable;
}
baselen = (u8 *) variable - (u8 *) mgmt;
if (baselen > len)
return;
parse_params.start = variable;
parse_params.len = len - baselen;
rcu_read_lock();
chanctx_conf = rcu_dereference(link->conf->chanctx_conf);
if (!chanctx_conf) {
rcu_read_unlock();
return;
}
if (ieee80211_rx_status_to_khz(rx_status) !=
ieee80211_channel_to_khz(chanctx_conf->def.chan)) {
rcu_read_unlock();
return;
}
chan = chanctx_conf->def.chan;
rcu_read_unlock();
if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon &&
!WARN_ON(ieee80211_vif_is_mld(&sdata->vif)) &&
ieee80211_rx_our_beacon(bssid, ifmgd->assoc_data->link[0].bss)) {
parse_params.bss = ifmgd->assoc_data->link[0].bss;
elems = ieee802_11_parse_elems_full(&parse_params);
if (!elems)
return;
ieee80211_rx_bss_info(link, mgmt, len, rx_status);
if (elems->dtim_period)
link->u.mgd.dtim_period = elems->dtim_period;
link->u.mgd.have_beacon = true;
ifmgd->assoc_data->need_beacon = false;
if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
link->conf->sync_tsf =
le64_to_cpu(mgmt->u.beacon.timestamp);
link->conf->sync_device_ts =
rx_status->device_timestamp;
link->conf->sync_dtim_count = elems->dtim_count;
}
if (elems->mbssid_config_ie)
bss_conf->profile_periodicity =
elems->mbssid_config_ie->profile_periodicity;
else
bss_conf->profile_periodicity = 0;
if (elems->ext_capab_len >= 11 &&
(elems->ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
bss_conf->ema_ap = true;
else
bss_conf->ema_ap = false;
/* continue assoc process */
ifmgd->assoc_data->timeout = jiffies;
ifmgd->assoc_data->timeout_started = true;
run_again(sdata, ifmgd->assoc_data->timeout);
kfree(elems);
return;
}
if (!ifmgd->associated ||
!ieee80211_rx_our_beacon(bssid, link->u.mgd.bss))
return;
bssid = link->u.mgd.bssid;
if (!(rx_status->flag & RX_FLAG_NO_SIGNAL_VAL))
ieee80211_handle_beacon_sig(link, ifmgd, bss_conf,
local, rx_status);
if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) {
mlme_dbg_ratelimited(sdata,
"cancelling AP probe due to a received beacon\n");
ieee80211_reset_ap_probe(sdata);
}
/*
* Push the beacon loss detection into the future since
* we are processing a beacon from the AP just now.
*/
ieee80211_sta_reset_beacon_monitor(sdata);
/* TODO: CRC urrently not calculated on S1G Beacon Compatibility
* element (which carries the beacon interval). Don't forget to add a
* bit to care_about_ies[] above if mac80211 is interested in a
* changing S1G element.
*/
if (!ieee80211_is_s1g_beacon(hdr->frame_control))
ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
parse_params.bss = link->u.mgd.bss;
parse_params.filter = care_about_ies;
parse_params.crc = ncrc;
elems = ieee802_11_parse_elems_full(&parse_params);
if (!elems)
return;
ncrc = elems->crc;
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
ieee80211_check_tim(elems->tim, elems->tim_len, vif_cfg->aid)) {
if (local->hw.conf.dynamic_ps_timeout > 0) {
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
ieee80211_hw_config(local,
IEEE80211_CONF_CHANGE_PS);
}
ieee80211_send_nullfunc(local, sdata, false);
} else if (!local->pspolling && sdata->u.mgd.powersave) {
local->pspolling = true;
/*
* Here is assumed that the driver will be
* able to send ps-poll frame and receive a
* response even though power save mode is
* enabled, but some drivers might require
* to disable power save here. This needs
* to be investigated.
*/
ieee80211_send_pspoll(local, sdata);
}
}
if (sdata->vif.p2p ||
sdata->vif.driver_flags & IEEE80211_VIF_GET_NOA_UPDATE) {
struct ieee80211_p2p_noa_attr noa = {};
int ret;
ret = cfg80211_get_p2p_attr(variable,
len - baselen,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *) &noa, sizeof(noa));
if (ret >= 2) {
if (link->u.mgd.p2p_noa_index != noa.index) {
/* valid noa_attr and index changed */
link->u.mgd.p2p_noa_index = noa.index;
memcpy(&bss_conf->p2p_noa_attr, &noa, sizeof(noa));
changed |= BSS_CHANGED_P2P_PS;
/*
* make sure we update all information, the CRC
* mechanism doesn't look at P2P attributes.
*/
link->u.mgd.beacon_crc_valid = false;
}
} else if (link->u.mgd.p2p_noa_index != -1) {
/* noa_attr not found and we had valid noa_attr before */
link->u.mgd.p2p_noa_index = -1;
memset(&bss_conf->p2p_noa_attr, 0, sizeof(bss_conf->p2p_noa_attr));
changed |= BSS_CHANGED_P2P_PS;
link->u.mgd.beacon_crc_valid = false;
}
}
if (link->u.mgd.csa_waiting_bcn)
ieee80211_chswitch_post_beacon(link);
/*
* Update beacon timing and dtim count on every beacon appearance. This
* will allow the driver to use the most updated values. Do it before
* comparing this one with last received beacon.
* IMPORTANT: These parameters would possibly be out of sync by the time
* the driver will use them. The synchronized view is currently
* guaranteed only in certain callbacks.
*/
if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) &&
!ieee80211_is_s1g_beacon(hdr->frame_control)) {
link->conf->sync_tsf =
le64_to_cpu(mgmt->u.beacon.timestamp);
link->conf->sync_device_ts =
rx_status->device_timestamp;
link->conf->sync_dtim_count = elems->dtim_count;
}
if ((ncrc == link->u.mgd.beacon_crc && link->u.mgd.beacon_crc_valid) ||
ieee80211_is_s1g_short_beacon(mgmt->frame_control))
goto free;
link->u.mgd.beacon_crc = ncrc;
link->u.mgd.beacon_crc_valid = true;
ieee80211_rx_bss_info(link, mgmt, len, rx_status);
ieee80211_sta_process_chanswitch(link, rx_status->mactime,
rx_status->device_timestamp,
elems, true);
if (!link->u.mgd.disable_wmm_tracking &&
ieee80211_sta_wmm_params(local, link, elems->wmm_param,
elems->wmm_param_len,
elems->mu_edca_param_set))
changed |= BSS_CHANGED_QOS;
/*
* If we haven't had a beacon before, tell the driver about the
* DTIM period (and beacon timing if desired) now.
*/
if (!link->u.mgd.have_beacon) {
/* a few bogus AP send dtim_period = 0 or no TIM IE */
bss_conf->dtim_period = elems->dtim_period ?: 1;
changed |= BSS_CHANGED_BEACON_INFO;
link->u.mgd.have_beacon = true;
mutex_lock(&local->iflist_mtx);
ieee80211_recalc_ps(local);
mutex_unlock(&local->iflist_mtx);
ieee80211_recalc_ps_vif(sdata);
}
if (elems->erp_info) {
erp_valid = true;
erp_value = elems->erp_info[0];
} else {
erp_valid = false;
}
if (!ieee80211_is_s1g_beacon(hdr->frame_control))
changed |= ieee80211_handle_bss_capability(link,
le16_to_cpu(mgmt->u.beacon.capab_info),
erp_valid, erp_value);
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
if (WARN_ON(!sta)) {
mutex_unlock(&local->sta_mtx);
goto free;
}
link_sta = rcu_dereference_protected(sta->link[link->link_id],
lockdep_is_held(&local->sta_mtx));
if (WARN_ON(!link_sta)) {
mutex_unlock(&local->sta_mtx);
goto free;
}
if (WARN_ON(!link->conf->chandef.chan))
goto free;
sband = local->hw.wiphy->bands[link->conf->chandef.chan->band];
changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems);
if (ieee80211_config_bw(link, elems->ht_cap_elem,
elems->vht_cap_elem, elems->ht_operation,
elems->vht_operation, elems->he_operation,
elems->eht_operation,
elems->s1g_oper, bssid, &changed)) {
mutex_unlock(&local->sta_mtx);
sdata_info(sdata,
"failed to follow AP %pM bandwidth change, disconnect\n",
bssid);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DEAUTH_LEAVING,
true, deauth_buf);
ieee80211_report_disconnect(sdata, deauth_buf,
sizeof(deauth_buf), true,
WLAN_REASON_DEAUTH_LEAVING,
false);
goto free;
}
if (elems->opmode_notif)
ieee80211_vht_handle_opmode(sdata, link_sta,
*elems->opmode_notif,
rx_status->band);
mutex_unlock(&local->sta_mtx);
changed |= ieee80211_handle_pwr_constr(link, chan, mgmt,
elems->country_elem,
elems->country_elem_len,
elems->pwr_constr_elem,
elems->cisco_dtpc_elem);
if (elems->eht_operation &&
!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
if (!ieee80211_config_puncturing(link, elems->eht_operation,
&changed)) {
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DEAUTH_LEAVING,
true, deauth_buf);
ieee80211_report_disconnect(sdata, deauth_buf,
sizeof(deauth_buf), true,
WLAN_REASON_DEAUTH_LEAVING,
false);
goto free;
}
}
ieee80211_ml_reconfiguration(sdata, elems);
ieee80211_link_info_change_notify(sdata, link, changed);
free:
kfree(elems);
}
void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_link_data *link = &sdata->deflink;
struct ieee80211_rx_status *rx_status;
struct ieee80211_hdr *hdr;
u16 fc;
rx_status = (struct ieee80211_rx_status *) skb->cb;
hdr = (struct ieee80211_hdr *) skb->data;
fc = le16_to_cpu(hdr->frame_control);
sdata_lock(sdata);
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_S1G_BEACON:
ieee80211_rx_mgmt_beacon(link, hdr, skb->len, rx_status);
break;
}
sdata_unlock(sdata);
}
void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_link_data *link = &sdata->deflink;
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
u16 fc;
int ies_len;
rx_status = (struct ieee80211_rx_status *) skb->cb;
mgmt = (struct ieee80211_mgmt *) skb->data;
fc = le16_to_cpu(mgmt->frame_control);
sdata_lock(sdata);
if (rx_status->link_valid) {
link = sdata_dereference(sdata->link[rx_status->link_id],
sdata);
if (!link)
goto out;
}
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_BEACON:
ieee80211_rx_mgmt_beacon(link, (void *)mgmt,
skb->len, rx_status);
break;
case IEEE80211_STYPE_PROBE_RESP:
ieee80211_rx_mgmt_probe_resp(link, skb);
break;
case IEEE80211_STYPE_AUTH:
ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len);
break;
case IEEE80211_STYPE_DEAUTH:
ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
break;
case IEEE80211_STYPE_DISASSOC:
ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
break;
case IEEE80211_STYPE_ASSOC_RESP:
case IEEE80211_STYPE_REASSOC_RESP:
ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len);
break;
case IEEE80211_STYPE_ACTION:
if (!sdata->u.mgd.associated ||
!ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr))
break;
if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) {
struct ieee802_11_elems *elems;
ies_len = skb->len -
offsetof(struct ieee80211_mgmt,
u.action.u.chan_switch.variable);
if (ies_len < 0)
break;
/* CSA IE cannot be overridden, no need for BSSID */
elems = ieee802_11_parse_elems(
mgmt->u.action.u.chan_switch.variable,
ies_len, true, NULL);
if (elems && !elems->parse_error)
ieee80211_sta_process_chanswitch(link,
rx_status->mactime,
rx_status->device_timestamp,
elems, false);
kfree(elems);
} else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) {
struct ieee802_11_elems *elems;
ies_len = skb->len -
offsetof(struct ieee80211_mgmt,
u.action.u.ext_chan_switch.variable);
if (ies_len < 0)
break;
/*
* extended CSA IE can't be overridden, no need for
* BSSID
*/
elems = ieee802_11_parse_elems(
mgmt->u.action.u.ext_chan_switch.variable,
ies_len, true, NULL);
if (elems && !elems->parse_error) {
/* for the handling code pretend it was an IE */
elems->ext_chansw_ie =
&mgmt->u.action.u.ext_chan_switch.data;
ieee80211_sta_process_chanswitch(link,
rx_status->mactime,
rx_status->device_timestamp,
elems, false);
}
kfree(elems);
}
break;
}
out:
sdata_unlock(sdata);
}
static void ieee80211_sta_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
from_timer(sdata, t, u.mgd.timer);
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
}
void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
u8 reason, bool tx)
{
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
tx, frame_buf);
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
reason, false);
}
static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data;
u32 tx_flags = 0;
u16 trans = 1;
u16 status = 0;
struct ieee80211_prep_tx_info info = {
.subtype = IEEE80211_STYPE_AUTH,
};
sdata_assert_lock(sdata);
if (WARN_ON_ONCE(!auth_data))
return -EINVAL;
auth_data->tries++;
if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
sdata_info(sdata, "authentication with %pM timed out\n",
auth_data->ap_addr);
/*
* Most likely AP is not in the range so remove the
* bss struct for that AP.
*/
cfg80211_unlink_bss(local->hw.wiphy, auth_data->bss);
return -ETIMEDOUT;
}
if (auth_data->algorithm == WLAN_AUTH_SAE)
info.duration = jiffies_to_msecs(IEEE80211_AUTH_TIMEOUT_SAE);
drv_mgd_prepare_tx(local, sdata, &info);
sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
auth_data->ap_addr, auth_data->tries,
IEEE80211_AUTH_MAX_TRIES);
auth_data->expected_transaction = 2;
if (auth_data->algorithm == WLAN_AUTH_SAE) {
trans = auth_data->sae_trans;
status = auth_data->sae_status;
auth_data->expected_transaction = trans;
}
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
auth_data->data, auth_data->data_len,
auth_data->ap_addr, auth_data->ap_addr,
NULL, 0, 0, tx_flags);
if (tx_flags == 0) {
if (auth_data->algorithm == WLAN_AUTH_SAE)
auth_data->timeout = jiffies +
IEEE80211_AUTH_TIMEOUT_SAE;
else
auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
} else {
auth_data->timeout =
round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
}
auth_data->timeout_started = true;
run_again(sdata, auth_data->timeout);
return 0;
}
static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
struct ieee80211_local *local = sdata->local;
int ret;
sdata_assert_lock(sdata);
assoc_data->tries++;
if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
sdata_info(sdata, "association with %pM timed out\n",
assoc_data->ap_addr);
/*
* Most likely AP is not in the range so remove the
* bss struct for that AP.
*/
cfg80211_unlink_bss(local->hw.wiphy,
assoc_data->link[assoc_data->assoc_link_id].bss);
return -ETIMEDOUT;
}
sdata_info(sdata, "associate with %pM (try %d/%d)\n",
assoc_data->ap_addr, assoc_data->tries,
IEEE80211_ASSOC_MAX_TRIES);
ret = ieee80211_send_assoc(sdata);
if (ret)
return ret;
if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
assoc_data->timeout_started = true;
run_again(sdata, assoc_data->timeout);
} else {
assoc_data->timeout =
round_jiffies_up(jiffies +
IEEE80211_ASSOC_TIMEOUT_LONG);
assoc_data->timeout_started = true;
run_again(sdata, assoc_data->timeout);
}
return 0;
}
void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
__le16 fc, bool acked)
{
struct ieee80211_local *local = sdata->local;
sdata->u.mgd.status_fc = fc;
sdata->u.mgd.status_acked = acked;
sdata->u.mgd.status_received = true;
wiphy_work_queue(local->hw.wiphy, &sdata->work);
}
void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
sdata_lock(sdata);
if (ifmgd->status_received) {
__le16 fc = ifmgd->status_fc;
bool status_acked = ifmgd->status_acked;
ifmgd->status_received = false;
if (ifmgd->auth_data && ieee80211_is_auth(fc)) {
if (status_acked) {
if (ifmgd->auth_data->algorithm ==
WLAN_AUTH_SAE)
ifmgd->auth_data->timeout =
jiffies +
IEEE80211_AUTH_TIMEOUT_SAE;
else
ifmgd->auth_data->timeout =
jiffies +
IEEE80211_AUTH_TIMEOUT_SHORT;
run_again(sdata, ifmgd->auth_data->timeout);
} else {
ifmgd->auth_data->timeout = jiffies - 1;
}
ifmgd->auth_data->timeout_started = true;
} else if (ifmgd->assoc_data &&
(ieee80211_is_assoc_req(fc) ||
ieee80211_is_reassoc_req(fc))) {
if (status_acked) {
ifmgd->assoc_data->timeout =
jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT;
run_again(sdata, ifmgd->assoc_data->timeout);
} else {
ifmgd->assoc_data->timeout = jiffies - 1;
}
ifmgd->assoc_data->timeout_started = true;
}
}
if (ifmgd->auth_data && ifmgd->auth_data->timeout_started &&
time_after(jiffies, ifmgd->auth_data->timeout)) {
if (ifmgd->auth_data->done || ifmgd->auth_data->waiting) {
/*
* ok ... we waited for assoc or continuation but
* userspace didn't do it, so kill the auth data
*/
ieee80211_destroy_auth_data(sdata, false);
} else if (ieee80211_auth(sdata)) {
u8 ap_addr[ETH_ALEN];
struct ieee80211_event event = {
.type = MLME_EVENT,
.u.mlme.data = AUTH_EVENT,
.u.mlme.status = MLME_TIMEOUT,
};
memcpy(ap_addr, ifmgd->auth_data->ap_addr, ETH_ALEN);
ieee80211_destroy_auth_data(sdata, false);
cfg80211_auth_timeout(sdata->dev, ap_addr);
drv_event_callback(sdata->local, sdata, &event);
}
} else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started)
run_again(sdata, ifmgd->auth_data->timeout);
if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started &&
time_after(jiffies, ifmgd->assoc_data->timeout)) {
if ((ifmgd->assoc_data->need_beacon &&
!sdata->deflink.u.mgd.have_beacon) ||
ieee80211_do_assoc(sdata)) {
struct ieee80211_event event = {
.type = MLME_EVENT,
.u.mlme.data = ASSOC_EVENT,
.u.mlme.status = MLME_TIMEOUT,
};
ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT);
drv_event_callback(sdata->local, sdata, &event);
}
} else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started)
run_again(sdata, ifmgd->assoc_data->timeout);
if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL &&
ifmgd->associated) {
u8 *bssid = sdata->deflink.u.mgd.bssid;
int max_tries;
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
max_tries = max_nullfunc_tries;
else
max_tries = max_probe_tries;
/* ACK received for nullfunc probing frame */
if (!ifmgd->probe_send_count)
ieee80211_reset_ap_probe(sdata);
else if (ifmgd->nullfunc_failed) {
if (ifmgd->probe_send_count < max_tries) {
mlme_dbg(sdata,
"No ack for nullfunc frame to AP %pM, try %d/%i\n",
bssid, ifmgd->probe_send_count,
max_tries);
ieee80211_mgd_probe_ap_send(sdata);
} else {
mlme_dbg(sdata,
"No ack for nullfunc frame to AP %pM, disconnecting.\n",
bssid);
ieee80211_sta_connection_lost(sdata,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
false);
}
} else if (time_is_after_jiffies(ifmgd->probe_timeout))
run_again(sdata, ifmgd->probe_timeout);
else if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
mlme_dbg(sdata,
"Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
bssid, probe_wait_ms);
ieee80211_sta_connection_lost(sdata,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
} else if (ifmgd->probe_send_count < max_tries) {
mlme_dbg(sdata,
"No probe response from AP %pM after %dms, try %d/%i\n",
bssid, probe_wait_ms,
ifmgd->probe_send_count, max_tries);
ieee80211_mgd_probe_ap_send(sdata);
} else {
/*
* We actually lost the connection ... or did we?
* Let's make sure!
*/
mlme_dbg(sdata,
"No probe response from AP %pM after %dms, disconnecting.\n",
bssid, probe_wait_ms);
ieee80211_sta_connection_lost(sdata,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
}
}
sdata_unlock(sdata);
}
static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
from_timer(sdata, t, u.mgd.bcn_mon_timer);
if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
return;
if (sdata->vif.bss_conf.csa_active &&
!sdata->deflink.u.mgd.csa_waiting_bcn)
return;
if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
return;
sdata->u.mgd.connection_loss = false;
wiphy_work_queue(sdata->local->hw.wiphy,
&sdata->u.mgd.beacon_connection_loss_work);
}
static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
from_timer(sdata, t, u.mgd.conn_mon_timer);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
unsigned long timeout;
if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
return;
if (sdata->vif.bss_conf.csa_active &&
!sdata->deflink.u.mgd.csa_waiting_bcn)
return;
sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
if (!sta)
return;
timeout = sta->deflink.status_stats.last_ack;
if (time_before(sta->deflink.status_stats.last_ack, sta->deflink.rx_stats.last_rx))
timeout = sta->deflink.rx_stats.last_rx;
timeout += IEEE80211_CONNECTION_IDLE_TIME;
/* If timeout is after now, then update timer to fire at
* the later date, but do not actually probe at this time.
*/
if (time_is_after_jiffies(timeout)) {
mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout));
return;
}
ieee80211_queue_work(&local->hw, &ifmgd->monitor_work);
}
static void ieee80211_sta_monitor_work(struct work_struct *work)
{
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
u.mgd.monitor_work);
ieee80211_mgd_probe_ap(sdata, false);
}
static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
{
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
__ieee80211_stop_poll(sdata);
/* let's probe the connection once */
if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
ieee80211_queue_work(&sdata->local->hw,
&sdata->u.mgd.monitor_work);
}
}
#ifdef CONFIG_PM
void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
sdata_lock(sdata);
if (ifmgd->auth_data || ifmgd->assoc_data) {
const u8 *ap_addr = ifmgd->auth_data ?
ifmgd->auth_data->ap_addr :
ifmgd->assoc_data->ap_addr;
/*
* If we are trying to authenticate / associate while suspending,
* cfg80211 won't know and won't actually abort those attempts,
* thus we need to do that ourselves.
*/
ieee80211_send_deauth_disassoc(sdata, ap_addr, ap_addr,
IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DEAUTH_LEAVING,
false, frame_buf);
if (ifmgd->assoc_data)
ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON);
if (ifmgd->auth_data)
ieee80211_destroy_auth_data(sdata, false);
cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
IEEE80211_DEAUTH_FRAME_LEN,
false);
}
/* This is a bit of a hack - we should find a better and more generic
* solution to this. Normally when suspending, cfg80211 will in fact
* deauthenticate. However, it doesn't (and cannot) stop an ongoing
* auth (not so important) or assoc (this is the problem) process.
*
* As a consequence, it can happen that we are in the process of both
* associating and suspending, and receive an association response
* after cfg80211 has checked if it needs to disconnect, but before
* we actually set the flag to drop incoming frames. This will then
* cause the workqueue flush to process the association response in
* the suspend, resulting in a successful association just before it
* tries to remove the interface from the driver, which now though
* has a channel context assigned ... this results in issues.
*
* To work around this (for now) simply deauth here again if we're
* now connected.
*/
if (ifmgd->associated && !sdata->local->wowlan) {
u8 bssid[ETH_ALEN];
struct cfg80211_deauth_request req = {
.reason_code = WLAN_REASON_DEAUTH_LEAVING,
.bssid = bssid,
};
memcpy(bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
ieee80211_mgd_deauth(sdata, &req);
}
sdata_unlock(sdata);
}
#endif
void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
sdata_lock(sdata);
if (!ifmgd->associated) {
sdata_unlock(sdata);
return;
}
if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
mlme_dbg(sdata, "driver requested disconnect after resume\n");
ieee80211_sta_connection_lost(sdata,
WLAN_REASON_UNSPECIFIED,
true);
sdata_unlock(sdata);
return;
}
if (sdata->flags & IEEE80211_SDATA_DISCONNECT_HW_RESTART) {
sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_HW_RESTART;
mlme_dbg(sdata, "driver requested disconnect after hardware restart\n");
ieee80211_sta_connection_lost(sdata,
WLAN_REASON_UNSPECIFIED,
true);
sdata_unlock(sdata);
return;
}
sdata_unlock(sdata);
}
static void ieee80211_request_smps_mgd_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
struct ieee80211_link_data *link =
container_of(work, struct ieee80211_link_data,
u.mgd.request_smps_work);
sdata_lock(link->sdata);
__ieee80211_request_smps_mgd(link->sdata, link,
link->u.mgd.driver_smps_mode);
sdata_unlock(link->sdata);
}
/* interface setup */
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
wiphy_work_init(&ifmgd->beacon_connection_loss_work,
ieee80211_beacon_connection_loss_work);
wiphy_work_init(&ifmgd->csa_connection_drop_work,
ieee80211_csa_connection_drop_work);
INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work,
ieee80211_tdls_peer_del_work);
wiphy_delayed_work_init(&ifmgd->ml_reconf_work,
ieee80211_ml_reconf_work);
timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0);
timer_setup(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 0);
timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0);
INIT_DELAYED_WORK(&ifmgd->tx_tspec_wk,
ieee80211_sta_handle_tspec_ac_params_wk);
ifmgd->flags = 0;
ifmgd->powersave = sdata->wdev.ps;
ifmgd->uapsd_queues = sdata->local->hw.uapsd_queues;
ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len;
/* Setup TDLS data */
spin_lock_init(&ifmgd->teardown_lock);
ifmgd->teardown_skb = NULL;
ifmgd->orig_teardown_skb = NULL;
}
void ieee80211_mgd_setup_link(struct ieee80211_link_data *link)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
unsigned int link_id = link->link_id;
link->u.mgd.p2p_noa_index = -1;
link->u.mgd.conn_flags = 0;
link->conf->bssid = link->u.mgd.bssid;
wiphy_work_init(&link->u.mgd.request_smps_work,
ieee80211_request_smps_mgd_work);
if (local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS)
link->u.mgd.req_smps = IEEE80211_SMPS_AUTOMATIC;
else
link->u.mgd.req_smps = IEEE80211_SMPS_OFF;
wiphy_delayed_work_init(&link->u.mgd.chswitch_work,
ieee80211_chswitch_work);
if (sdata->u.mgd.assoc_data)
ether_addr_copy(link->conf->addr,
sdata->u.mgd.assoc_data->link[link_id].addr);
else if (!is_valid_ether_addr(link->conf->addr))
eth_random_addr(link->conf->addr);
}
/* scan finished notification */
void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
/* Restart STA timers */
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (ieee80211_sdata_running(sdata))
ieee80211_restart_sta_timer(sdata);
}
rcu_read_unlock();
}
static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss, s8 link_id,
const u8 *ap_mld_addr, bool assoc,
bool override)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_bss *bss = (void *)cbss->priv;
struct sta_info *new_sta = NULL;
struct ieee80211_link_data *link;
bool have_sta = false;
bool mlo;
int err;
if (link_id >= 0) {
mlo = true;
if (WARN_ON(!ap_mld_addr))
return -EINVAL;
err = ieee80211_vif_set_links(sdata, BIT(link_id), 0);
} else {
if (WARN_ON(ap_mld_addr))
return -EINVAL;
ap_mld_addr = cbss->bssid;
err = ieee80211_vif_set_links(sdata, 0, 0);
link_id = 0;
mlo = false;
}
if (err)
return err;
link = sdata_dereference(sdata->link[link_id], sdata);
if (WARN_ON(!link)) {
err = -ENOLINK;
goto out_err;
}
if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) {
err = -EINVAL;
goto out_err;
}
/* If a reconfig is happening, bail out */
if (local->in_reconfig) {
err = -EBUSY;
goto out_err;
}
if (assoc) {
rcu_read_lock();
have_sta = sta_info_get(sdata, ap_mld_addr);
rcu_read_unlock();
}
if (!have_sta) {
if (mlo)
new_sta = sta_info_alloc_with_link(sdata, ap_mld_addr,
link_id, cbss->bssid,
GFP_KERNEL);
else
new_sta = sta_info_alloc(sdata, ap_mld_addr, GFP_KERNEL);
if (!new_sta) {
err = -ENOMEM;
goto out_err;
}
new_sta->sta.mlo = mlo;
}
/*
* Set up the information for the new channel before setting the
* new channel. We can't - completely race-free - change the basic
* rates bitmap and the channel (sband) that it refers to, but if
* we set it up before we at least avoid calling into the driver's
* bss_info_changed() method with invalid information (since we do
* call that from changing the channel - only for IDLE and perhaps
* some others, but ...).
*
* So to avoid that, just set up all the new information before the
* channel, but tell the driver to apply it only afterwards, since
* it might need the new channel for that.
*/
if (new_sta) {
const struct cfg80211_bss_ies *ies;
struct link_sta_info *link_sta;
rcu_read_lock();
link_sta = rcu_dereference(new_sta->link[link_id]);
if (WARN_ON(!link_sta)) {
rcu_read_unlock();
sta_info_free(local, new_sta);
err = -EINVAL;
goto out_err;
}
err = ieee80211_mgd_setup_link_sta(link, new_sta,
link_sta, cbss);
if (err) {
rcu_read_unlock();
sta_info_free(local, new_sta);
goto out_err;
}
memcpy(link->u.mgd.bssid, cbss->bssid, ETH_ALEN);
/* set timing information */
link->conf->beacon_int = cbss->beacon_interval;
ies = rcu_dereference(cbss->beacon_ies);
if (ies) {
link->conf->sync_tsf = ies->tsf;
link->conf->sync_device_ts =
bss->device_ts_beacon;
ieee80211_get_dtim(ies,
&link->conf->sync_dtim_count,
NULL);
} else if (!ieee80211_hw_check(&sdata->local->hw,
TIMING_BEACON_ONLY)) {
ies = rcu_dereference(cbss->proberesp_ies);
/* must be non-NULL since beacon IEs were NULL */
link->conf->sync_tsf = ies->tsf;
link->conf->sync_device_ts =
bss->device_ts_presp;
link->conf->sync_dtim_count = 0;
} else {
link->conf->sync_tsf = 0;
link->conf->sync_device_ts = 0;
link->conf->sync_dtim_count = 0;
}
rcu_read_unlock();
}
if (new_sta || override) {
err = ieee80211_prep_channel(sdata, link, cbss,
&link->u.mgd.conn_flags);
if (err) {
if (new_sta)
sta_info_free(local, new_sta);
goto out_err;
}
}
if (new_sta) {
/*
* tell driver about BSSID, basic rates and timing
* this was set up above, before setting the channel
*/
ieee80211_link_info_change_notify(sdata, link,
BSS_CHANGED_BSSID |
BSS_CHANGED_BASIC_RATES |
BSS_CHANGED_BEACON_INT);
if (assoc)
sta_info_pre_move_state(new_sta, IEEE80211_STA_AUTH);
err = sta_info_insert(new_sta);
new_sta = NULL;
if (err) {
sdata_info(sdata,
"failed to insert STA entry for the AP (error %d)\n",
err);
goto out_err;
}
} else
WARN_ON_ONCE(!ether_addr_equal(link->u.mgd.bssid, cbss->bssid));
/* Cancel scan to ensure that nothing interferes with connection */
if (local->scanning)
ieee80211_scan_cancel(local);
return 0;
out_err:
ieee80211_link_release_channel(&sdata->deflink);
ieee80211_vif_set_links(sdata, 0, 0);
return err;
}
/* config hooks */
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct cfg80211_auth_request *req)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_auth_data *auth_data;
u16 auth_alg;
int err;
bool cont_auth;
/* prepare auth data structure */
switch (req->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
auth_alg = WLAN_AUTH_OPEN;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
if (fips_enabled)
return -EOPNOTSUPP;
auth_alg = WLAN_AUTH_SHARED_KEY;
break;
case NL80211_AUTHTYPE_FT:
auth_alg = WLAN_AUTH_FT;
break;
case NL80211_AUTHTYPE_NETWORK_EAP:
auth_alg = WLAN_AUTH_LEAP;
break;
case NL80211_AUTHTYPE_SAE:
auth_alg = WLAN_AUTH_SAE;
break;
case NL80211_AUTHTYPE_FILS_SK:
auth_alg = WLAN_AUTH_FILS_SK;
break;
case NL80211_AUTHTYPE_FILS_SK_PFS:
auth_alg = WLAN_AUTH_FILS_SK_PFS;
break;
case NL80211_AUTHTYPE_FILS_PK:
auth_alg = WLAN_AUTH_FILS_PK;
break;
default:
return -EOPNOTSUPP;
}
if (ifmgd->assoc_data)
return -EBUSY;
auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
req->ie_len, GFP_KERNEL);
if (!auth_data)
return -ENOMEM;
memcpy(auth_data->ap_addr,
req->ap_mld_addr ?: req->bss->bssid,
ETH_ALEN);
auth_data->bss = req->bss;
auth_data->link_id = req->link_id;
if (req->auth_data_len >= 4) {
if (req->auth_type == NL80211_AUTHTYPE_SAE) {
__le16 *pos = (__le16 *) req->auth_data;
auth_data->sae_trans = le16_to_cpu(pos[0]);
auth_data->sae_status = le16_to_cpu(pos[1]);
}
memcpy(auth_data->data, req->auth_data + 4,
req->auth_data_len - 4);
auth_data->data_len += req->auth_data_len - 4;
}
/* Check if continuing authentication or trying to authenticate with the
* same BSS that we were in the process of authenticating with and avoid
* removal and re-addition of the STA entry in
* ieee80211_prep_connection().
*/
cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss &&
ifmgd->auth_data->link_id == req->link_id;
if (req->ie && req->ie_len) {
memcpy(&auth_data->data[auth_data->data_len],
req->ie, req->ie_len);
auth_data->data_len += req->ie_len;
}
if (req->key && req->key_len) {
auth_data->key_len = req->key_len;
auth_data->key_idx = req->key_idx;
memcpy(auth_data->key, req->key, req->key_len);
}
auth_data->algorithm = auth_alg;
/* try to authenticate/probe */
if (ifmgd->auth_data) {
if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE) {
auth_data->peer_confirmed =
ifmgd->auth_data->peer_confirmed;
}
ieee80211_destroy_auth_data(sdata, cont_auth);
}
/* prep auth_data so we don't go into idle on disassoc */
ifmgd->auth_data = auth_data;
/* If this is continuation of an ongoing SAE authentication exchange
* (i.e., request to send SAE Confirm) and the peer has already
* confirmed, mark authentication completed since we are about to send
* out SAE Confirm.
*/
if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE &&
auth_data->peer_confirmed && auth_data->sae_trans == 2)
ieee80211_mark_sta_auth(sdata);
if (ifmgd->associated) {
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
sdata_info(sdata,
"disconnect from AP %pM for new auth to %pM\n",
sdata->vif.cfg.ap_addr, auth_data->ap_addr);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_UNSPECIFIED,
false, frame_buf);
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
WLAN_REASON_UNSPECIFIED,
false);
}
sdata_info(sdata, "authenticate with %pM\n", auth_data->ap_addr);
/* needed for transmitting the auth frame(s) properly */
memcpy(sdata->vif.cfg.ap_addr, auth_data->ap_addr, ETH_ALEN);
err = ieee80211_prep_connection(sdata, req->bss, req->link_id,
req->ap_mld_addr, cont_auth, false);
if (err)
goto err_clear;
err = ieee80211_auth(sdata);
if (err) {
sta_info_destroy_addr(sdata, auth_data->ap_addr);
goto err_clear;
}
/* hold our own reference */
cfg80211_ref_bss(local->hw.wiphy, auth_data->bss);
return 0;
err_clear:
if (!ieee80211_vif_is_mld(&sdata->vif)) {
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
mutex_lock(&sdata->local->mtx);
ieee80211_link_release_channel(&sdata->deflink);
mutex_unlock(&sdata->local->mtx);
}
ifmgd->auth_data = NULL;
kfree(auth_data);
return err;
}
static ieee80211_conn_flags_t
ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data,
struct cfg80211_assoc_request *req,
ieee80211_conn_flags_t conn_flags,
unsigned int link_id)
{
struct ieee80211_local *local = sdata->local;
const struct cfg80211_bss_ies *beacon_ies;
struct ieee80211_supported_band *sband;
const struct element *ht_elem, *vht_elem;
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
bool is_5ghz, is_6ghz;
cbss = assoc_data->link[link_id].bss;
if (WARN_ON(!cbss))
return 0;
bss = (void *)cbss->priv;
sband = local->hw.wiphy->bands[cbss->channel->band];
if (WARN_ON(!sband))
return 0;
link = sdata_dereference(sdata->link[link_id], sdata);
if (WARN_ON(!link))
return 0;
is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
/* for MLO connections assume advertising all rates is OK */
if (!req->ap_mld_addr) {
assoc_data->supp_rates = bss->supp_rates;
assoc_data->supp_rates_len = bss->supp_rates_len;
}
/* copy and link elems for the STA profile */
if (req->links[link_id].elems_len) {
memcpy(assoc_data->ie_pos, req->links[link_id].elems,
req->links[link_id].elems_len);
assoc_data->link[link_id].elems = assoc_data->ie_pos;
assoc_data->link[link_id].elems_len = req->links[link_id].elems_len;
assoc_data->ie_pos += req->links[link_id].elems_len;
}
rcu_read_lock();
ht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_OPERATION);
if (ht_elem && ht_elem->datalen >= sizeof(struct ieee80211_ht_operation))
assoc_data->link[link_id].ap_ht_param =
((struct ieee80211_ht_operation *)(ht_elem->data))->ht_param;
else if (!is_6ghz)
conn_flags |= IEEE80211_CONN_DISABLE_HT;
vht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) {
memcpy(&assoc_data->link[link_id].ap_vht_cap, vht_elem->data,
sizeof(struct ieee80211_vht_cap));
} else if (is_5ghz) {
link_info(link,
"VHT capa missing/short, disabling VHT/HE/EHT\n");
conn_flags |= IEEE80211_CONN_DISABLE_VHT |
IEEE80211_CONN_DISABLE_HE |
IEEE80211_CONN_DISABLE_EHT;
}
rcu_read_unlock();
link->u.mgd.beacon_crc_valid = false;
link->u.mgd.dtim_period = 0;
link->u.mgd.have_beacon = false;
/* override HT/VHT configuration only if the AP and we support it */
if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) {
struct ieee80211_sta_ht_cap sta_ht_cap;
memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
}
link->conf->eht_puncturing = 0;
rcu_read_lock();
beacon_ies = rcu_dereference(cbss->beacon_ies);
if (beacon_ies) {
const struct ieee80211_eht_operation *eht_oper;
const struct element *elem;
u8 dtim_count = 0;
ieee80211_get_dtim(beacon_ies, &dtim_count,
&link->u.mgd.dtim_period);
sdata->deflink.u.mgd.have_beacon = true;
if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
link->conf->sync_tsf = beacon_ies->tsf;
link->conf->sync_device_ts = bss->device_ts_beacon;
link->conf->sync_dtim_count = dtim_count;
}
elem = cfg80211_find_ext_elem(WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION,
beacon_ies->data, beacon_ies->len);
if (elem && elem->datalen >= 3)
link->conf->profile_periodicity = elem->data[2];
else
link->conf->profile_periodicity = 0;
elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
beacon_ies->data, beacon_ies->len);
if (elem && elem->datalen >= 11 &&
(elem->data[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
link->conf->ema_ap = true;
else
link->conf->ema_ap = false;
elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_OPERATION,
beacon_ies->data, beacon_ies->len);
eht_oper = (const void *)(elem->data + 1);
if (elem &&
ieee80211_eht_oper_size_ok((const void *)(elem->data + 1),
elem->datalen - 1) &&
(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
(eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
const struct ieee80211_eht_operation_info *info =
(void *)eht_oper->optional;
const u8 *disable_subchannel_bitmap = info->optional;
u16 bitmap;
bitmap = get_unaligned_le16(disable_subchannel_bitmap);
if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
&link->conf->chandef))
ieee80211_handle_puncturing_bitmap(link,
eht_oper,
bitmap,
NULL);
else
conn_flags |= IEEE80211_CONN_DISABLE_EHT;
}
}
rcu_read_unlock();
if (bss->corrupt_data) {
char *corrupt_type = "data";
if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_BEACON) {
if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP)
corrupt_type = "beacon and probe response";
else
corrupt_type = "beacon";
} else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP) {
corrupt_type = "probe response";
}
sdata_info(sdata, "associating to AP %pM with corrupt %s\n",
cbss->bssid, corrupt_type);
}
if (link->u.mgd.req_smps == IEEE80211_SMPS_AUTOMATIC) {
if (sdata->u.mgd.powersave)
link->smps_mode = IEEE80211_SMPS_DYNAMIC;
else
link->smps_mode = IEEE80211_SMPS_OFF;
} else {
link->smps_mode = link->u.mgd.req_smps;
}
return conn_flags;
}
int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct cfg80211_assoc_request *req)
{
unsigned int assoc_link_id = req->link_id < 0 ? 0 : req->link_id;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_assoc_data *assoc_data;
const struct element *ssid_elem;
struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
ieee80211_conn_flags_t conn_flags = 0;
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
bool override;
int i, err;
size_t size = sizeof(*assoc_data) + req->ie_len;
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++)
size += req->links[i].elems_len;
/* FIXME: no support for 4-addr MLO yet */
if (sdata->u.mgd.use_4addr && req->link_id >= 0)
return -EOPNOTSUPP;
assoc_data = kzalloc(size, GFP_KERNEL);
if (!assoc_data)
return -ENOMEM;
cbss = req->link_id < 0 ? req->bss : req->links[req->link_id].bss;
rcu_read_lock();
ssid_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID);
if (!ssid_elem || ssid_elem->datalen > sizeof(assoc_data->ssid)) {
rcu_read_unlock();
kfree(assoc_data);
return -EINVAL;
}
memcpy(assoc_data->ssid, ssid_elem->data, ssid_elem->datalen);
assoc_data->ssid_len = ssid_elem->datalen;
memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len);
vif_cfg->ssid_len = assoc_data->ssid_len;
rcu_read_unlock();
if (req->ap_mld_addr) {
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
if (!req->links[i].bss)
continue;
link = sdata_dereference(sdata->link[i], sdata);
if (link)
ether_addr_copy(assoc_data->link[i].addr,
link->conf->addr);
else
eth_random_addr(assoc_data->link[i].addr);
}
} else {
memcpy(assoc_data->link[0].addr, sdata->vif.addr, ETH_ALEN);
}
assoc_data->s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
memcpy(assoc_data->ap_addr,
req->ap_mld_addr ?: req->bss->bssid,
ETH_ALEN);
if (ifmgd->associated) {
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
sdata_info(sdata,
"disconnect from AP %pM for new assoc to %pM\n",
sdata->vif.cfg.ap_addr, assoc_data->ap_addr);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_UNSPECIFIED,
false, frame_buf);
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
WLAN_REASON_UNSPECIFIED,
false);
}
if (ifmgd->auth_data && !ifmgd->auth_data->done) {
err = -EBUSY;
goto err_free;
}
if (ifmgd->assoc_data) {
err = -EBUSY;
goto err_free;
}
if (ifmgd->auth_data) {
bool match;
/* keep sta info, bssid if matching */
match = ether_addr_equal(ifmgd->auth_data->ap_addr,
assoc_data->ap_addr) &&
ifmgd->auth_data->link_id == req->link_id;
ieee80211_destroy_auth_data(sdata, match);
}
/* prepare assoc data */
bss = (void *)cbss->priv;
assoc_data->wmm = bss->wmm_used &&
(local->hw.queues >= IEEE80211_NUM_ACS);
/*
* IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
* We still associate in non-HT mode (11a/b/g) if any one of these
* ciphers is configured as pairwise.
* We can set this to true for non-11n hardware, that'll be checked
* separately along with the peer capabilities.
*/
for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
conn_flags |= IEEE80211_CONN_DISABLE_HT;
conn_flags |= IEEE80211_CONN_DISABLE_VHT;
conn_flags |= IEEE80211_CONN_DISABLE_HE;
conn_flags |= IEEE80211_CONN_DISABLE_EHT;
netdev_info(sdata->dev,
"disabling HT/VHT/HE due to WEP/TKIP use\n");
}
}
/* also disable HT/VHT/HE/EHT if the AP doesn't use WMM */
if (!bss->wmm_used) {
conn_flags |= IEEE80211_CONN_DISABLE_HT;
conn_flags |= IEEE80211_CONN_DISABLE_VHT;
conn_flags |= IEEE80211_CONN_DISABLE_HE;
conn_flags |= IEEE80211_CONN_DISABLE_EHT;
netdev_info(sdata->dev,
"disabling HT/VHT/HE as WMM/QoS is not supported by the AP\n");
}
if (req->flags & ASSOC_REQ_DISABLE_HT) {
mlme_dbg(sdata, "HT disabled by flag, disabling HT/VHT/HE\n");
conn_flags |= IEEE80211_CONN_DISABLE_HT;
conn_flags |= IEEE80211_CONN_DISABLE_VHT;
conn_flags |= IEEE80211_CONN_DISABLE_HE;
conn_flags |= IEEE80211_CONN_DISABLE_EHT;
}
if (req->flags & ASSOC_REQ_DISABLE_VHT) {
mlme_dbg(sdata, "VHT disabled by flag, disabling VHT\n");
conn_flags |= IEEE80211_CONN_DISABLE_VHT;
}
if (req->flags & ASSOC_REQ_DISABLE_HE) {
mlme_dbg(sdata, "HE disabled by flag, disabling HE/EHT\n");
conn_flags |= IEEE80211_CONN_DISABLE_HE;
conn_flags |= IEEE80211_CONN_DISABLE_EHT;
}
if (req->flags & ASSOC_REQ_DISABLE_EHT)
conn_flags |= IEEE80211_CONN_DISABLE_EHT;
memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
sizeof(ifmgd->ht_capa_mask));
memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
sizeof(ifmgd->vht_capa_mask));
memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa));
memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask,
sizeof(ifmgd->s1g_capa_mask));
if (req->ie && req->ie_len) {
memcpy(assoc_data->ie, req->ie, req->ie_len);
assoc_data->ie_len = req->ie_len;
assoc_data->ie_pos = assoc_data->ie + assoc_data->ie_len;
} else {
assoc_data->ie_pos = assoc_data->ie;
}
if (req->fils_kek) {
/* should already be checked in cfg80211 - so warn */
if (WARN_ON(req->fils_kek_len > FILS_MAX_KEK_LEN)) {
err = -EINVAL;
goto err_free;
}
memcpy(assoc_data->fils_kek, req->fils_kek,
req->fils_kek_len);
assoc_data->fils_kek_len = req->fils_kek_len;
}
if (req->fils_nonces)
memcpy(assoc_data->fils_nonces, req->fils_nonces,
2 * FILS_NONCE_LEN);
/* default timeout */
assoc_data->timeout = jiffies;
assoc_data->timeout_started = true;
assoc_data->assoc_link_id = assoc_link_id;
if (req->ap_mld_addr) {
for (i = 0; i < ARRAY_SIZE(assoc_data->link); i++) {
assoc_data->link[i].conn_flags = conn_flags;
assoc_data->link[i].bss = req->links[i].bss;
assoc_data->link[i].disabled = req->links[i].disabled;
}
/* if there was no authentication, set up the link */
err = ieee80211_vif_set_links(sdata, BIT(assoc_link_id), 0);
if (err)
goto err_clear;
} else {
assoc_data->link[0].conn_flags = conn_flags;
assoc_data->link[0].bss = cbss;
}
link = sdata_dereference(sdata->link[assoc_link_id], sdata);
if (WARN_ON(!link)) {
err = -EINVAL;
goto err_clear;
}
/* keep old conn_flags from ieee80211_prep_channel() from auth */
conn_flags |= link->u.mgd.conn_flags;
conn_flags |= ieee80211_setup_assoc_link(sdata, assoc_data, req,
conn_flags, assoc_link_id);
override = link->u.mgd.conn_flags != conn_flags;
link->u.mgd.conn_flags |= conn_flags;
if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) &&
ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK),
"U-APSD not supported with HW_PS_NULLFUNC_STACK\n"))
sdata->vif.driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
if (bss->wmm_used && bss->uapsd_supported &&
(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD)) {
assoc_data->uapsd = true;
ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
} else {
assoc_data->uapsd = false;
ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED;
}
if (req->prev_bssid)
memcpy(assoc_data->prev_ap_addr, req->prev_bssid, ETH_ALEN);
if (req->use_mfp) {
ifmgd->mfp = IEEE80211_MFP_REQUIRED;
ifmgd->flags |= IEEE80211_STA_MFP_ENABLED;
} else {
ifmgd->mfp = IEEE80211_MFP_DISABLED;
ifmgd->flags &= ~IEEE80211_STA_MFP_ENABLED;
}
if (req->flags & ASSOC_REQ_USE_RRM)
ifmgd->flags |= IEEE80211_STA_ENABLE_RRM;
else
ifmgd->flags &= ~IEEE80211_STA_ENABLE_RRM;
if (req->crypto.control_port)
ifmgd->flags |= IEEE80211_STA_CONTROL_PORT;
else
ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT;
sdata->control_port_protocol = req->crypto.control_port_ethertype;
sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt;
sdata->control_port_over_nl80211 =
req->crypto.control_port_over_nl80211;
sdata->control_port_no_preauth = req->crypto.control_port_no_preauth;
/* kick off associate process */
ifmgd->assoc_data = assoc_data;
for (i = 0; i < ARRAY_SIZE(assoc_data->link); i++) {
if (!assoc_data->link[i].bss)
continue;
if (i == assoc_data->assoc_link_id)
continue;
/* only calculate the flags, hence link == NULL */
err = ieee80211_prep_channel(sdata, NULL, assoc_data->link[i].bss,
&assoc_data->link[i].conn_flags);
if (err)
goto err_clear;
}
/* needed for transmitting the assoc frames properly */
memcpy(sdata->vif.cfg.ap_addr, assoc_data->ap_addr, ETH_ALEN);
err = ieee80211_prep_connection(sdata, cbss, req->link_id,
req->ap_mld_addr, true, override);
if (err)
goto err_clear;
assoc_data->link[assoc_data->assoc_link_id].conn_flags =
link->u.mgd.conn_flags;
if (ieee80211_hw_check(&sdata->local->hw, NEED_DTIM_BEFORE_ASSOC)) {
const struct cfg80211_bss_ies *beacon_ies;
rcu_read_lock();
beacon_ies = rcu_dereference(req->bss->beacon_ies);
if (beacon_ies) {
/*
* Wait up to one beacon interval ...
* should this be more if we miss one?
*/
sdata_info(sdata, "waiting for beacon from %pM\n",
link->u.mgd.bssid);
assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
assoc_data->timeout_started = true;
assoc_data->need_beacon = true;
}
rcu_read_unlock();
}
run_again(sdata, assoc_data->timeout);
return 0;
err_clear:
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
ifmgd->assoc_data = NULL;
err_free:
kfree(assoc_data);
return err;
}
int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
struct cfg80211_deauth_request *req)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
bool tx = !req->local_state_change;
struct ieee80211_prep_tx_info info = {
.subtype = IEEE80211_STYPE_DEAUTH,
};
if (ifmgd->auth_data &&
ether_addr_equal(ifmgd->auth_data->ap_addr, req->bssid)) {
sdata_info(sdata,
"aborting authentication with %pM by local choice (Reason: %u=%s)\n",
req->bssid, req->reason_code,
ieee80211_get_reason_code_string(req->reason_code));
drv_mgd_prepare_tx(sdata->local, sdata, &info);
ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid,
IEEE80211_STYPE_DEAUTH,
req->reason_code, tx,
frame_buf);
ieee80211_destroy_auth_data(sdata, false);
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
req->reason_code, false);
drv_mgd_complete_tx(sdata->local, sdata, &info);
return 0;
}
if (ifmgd->assoc_data &&
ether_addr_equal(ifmgd->assoc_data->ap_addr, req->bssid)) {
sdata_info(sdata,
"aborting association with %pM by local choice (Reason: %u=%s)\n",
req->bssid, req->reason_code,
ieee80211_get_reason_code_string(req->reason_code));
drv_mgd_prepare_tx(sdata->local, sdata, &info);
ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid,
IEEE80211_STYPE_DEAUTH,
req->reason_code, tx,
frame_buf);
ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON);
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
req->reason_code, false);
return 0;
}
if (ifmgd->associated &&
ether_addr_equal(sdata->vif.cfg.ap_addr, req->bssid)) {
sdata_info(sdata,
"deauthenticating from %pM by local choice (Reason: %u=%s)\n",
req->bssid, req->reason_code,
ieee80211_get_reason_code_string(req->reason_code));
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
req->reason_code, tx, frame_buf);
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
req->reason_code, false);
drv_mgd_complete_tx(sdata->local, sdata, &info);
return 0;
}
return -ENOTCONN;
}
int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
struct cfg80211_disassoc_request *req)
{
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
if (!sdata->u.mgd.associated ||
memcmp(sdata->vif.cfg.ap_addr, req->ap_addr, ETH_ALEN))
return -ENOTCONN;
sdata_info(sdata,
"disassociating from %pM by local choice (Reason: %u=%s)\n",
req->ap_addr, req->reason_code,
ieee80211_get_reason_code_string(req->reason_code));
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC,
req->reason_code, !req->local_state_change,
frame_buf);
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
req->reason_code, false);
return 0;
}
void ieee80211_mgd_stop_link(struct ieee80211_link_data *link)
{
wiphy_work_cancel(link->sdata->local->hw.wiphy,
&link->u.mgd.request_smps_work);
wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy,
&link->u.mgd.chswitch_work);
}
void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
/*
* Make sure some work items will not run after this,
* they will not do anything but might not have been
* cancelled when disconnecting.
*/
cancel_work_sync(&ifmgd->monitor_work);
wiphy_work_cancel(sdata->local->hw.wiphy,
&ifmgd->beacon_connection_loss_work);
wiphy_work_cancel(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
cancel_delayed_work_sync(&ifmgd->tdls_peer_del_work);
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
&ifmgd->ml_reconf_work);
sdata_lock(sdata);
if (ifmgd->assoc_data)
ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT);
if (ifmgd->auth_data)
ieee80211_destroy_auth_data(sdata, false);
spin_lock_bh(&ifmgd->teardown_lock);
if (ifmgd->teardown_skb) {
kfree_skb(ifmgd->teardown_skb);
ifmgd->teardown_skb = NULL;
ifmgd->orig_teardown_skb = NULL;
}
kfree(ifmgd->assoc_req_ies);
ifmgd->assoc_req_ies = NULL;
ifmgd->assoc_req_ies_len = 0;
spin_unlock_bh(&ifmgd->teardown_lock);
del_timer_sync(&ifmgd->timer);
sdata_unlock(sdata);
}
void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
enum nl80211_cqm_rssi_threshold_event rssi_event,
s32 rssi_level,
gfp_t gfp)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
trace_api_cqm_rssi_notify(sdata, rssi_event, rssi_level);
cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, rssi_level, gfp);
}
EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
trace_api_cqm_beacon_loss_notify(sdata->local, sdata);
cfg80211_cqm_beacon_loss_notify(sdata->dev, gfp);
}
EXPORT_SYMBOL(ieee80211_cqm_beacon_loss_notify);
static void _ieee80211_enable_rssi_reports(struct ieee80211_sub_if_data *sdata,
int rssi_min_thold,
int rssi_max_thold)
{
trace_api_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold);
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
return;
/*
* Scale up threshold values before storing it, as the RSSI averaging
* algorithm uses a scaled up value as well. Change this scaling
* factor if the RSSI averaging algorithm changes.
*/
sdata->u.mgd.rssi_min_thold = rssi_min_thold*16;
sdata->u.mgd.rssi_max_thold = rssi_max_thold*16;
}
void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
int rssi_min_thold,
int rssi_max_thold)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
WARN_ON(rssi_min_thold == rssi_max_thold ||
rssi_min_thold > rssi_max_thold);
_ieee80211_enable_rssi_reports(sdata, rssi_min_thold,
rssi_max_thold);
}
EXPORT_SYMBOL(ieee80211_enable_rssi_reports);
void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
_ieee80211_enable_rssi_reports(sdata, 0, 0);
}
EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
| linux-master | net/mac80211/mlme.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2008-2010 Johannes Berg <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2021-2023 Intel Corporation
*/
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
#include "rate.h"
#include "mesh.h"
#include "led.h"
#include "wme.h"
void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int tmp;
skb->pkt_type = IEEE80211_TX_STATUS_MSG;
skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
&local->skb_queue : &local->skb_queue_unreliable, skb);
tmp = skb_queue_len(&local->skb_queue) +
skb_queue_len(&local->skb_queue_unreliable);
while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
(skb = skb_dequeue(&local->skb_queue_unreliable))) {
ieee80211_free_txskb(hw, skb);
tmp--;
I802_DEBUG_INC(local->tx_status_drop);
}
tasklet_schedule(&local->tasklet);
}
EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
struct sta_info *sta,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
int ac;
if (info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
IEEE80211_TX_CTL_AMPDU |
IEEE80211_TX_CTL_HW_80211_ENCAP)) {
ieee80211_free_txskb(&local->hw, skb);
return;
}
/*
* This skb 'survived' a round-trip through the driver, and
* hopefully the driver didn't mangle it too badly. However,
* we can definitely not rely on the control information
* being correct. Clear it so we don't get junk there, and
* indicate that it needs new processing, but must not be
* modified/encrypted again.
*/
memset(&info->control, 0, sizeof(info->control));
info->control.jiffies = jiffies;
info->control.vif = &sta->sdata->vif;
info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
info->flags |= IEEE80211_TX_INTFL_RETRANSMISSION;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
sta->deflink.status_stats.filtered++;
/*
* Clear more-data bit on filtered frames, it might be set
* but later frames might time out so it might have to be
* clear again ... It's all rather unlikely (this frame
* should time out first, right?) but let's not confuse
* peers unnecessarily.
*/
if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA))
hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *p = ieee80211_get_qos_ctl(hdr);
int tid = *p & IEEE80211_QOS_CTL_TID_MASK;
/*
* Clear EOSP if set, this could happen e.g.
* if an absence period (us being a P2P GO)
* shortens the SP.
*/
if (*p & IEEE80211_QOS_CTL_EOSP)
*p &= ~IEEE80211_QOS_CTL_EOSP;
ac = ieee80211_ac_from_tid(tid);
} else {
ac = IEEE80211_AC_BE;
}
/*
* Clear the TX filter mask for this STA when sending the next
* packet. If the STA went to power save mode, this will happen
* when it wakes up for the next time.
*/
set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
ieee80211_clear_fast_xmit(sta);
/*
* This code races in the following way:
*
* (1) STA sends frame indicating it will go to sleep and does so
* (2) hardware/firmware adds STA to filter list, passes frame up
* (3) hardware/firmware processes TX fifo and suppresses a frame
* (4) we get TX status before having processed the frame and
* knowing that the STA has gone to sleep.
*
* This is actually quite unlikely even when both those events are
* processed from interrupts coming in quickly after one another or
* even at the same time because we queue both TX status events and
* RX frames to be processed by a tasklet and process them in the
* same order that they were received or TX status last. Hence, there
* is no race as long as the frame RX is processed before the next TX
* status, which drivers can ensure, see below.
*
* Note that this can only happen if the hardware or firmware can
* actually add STAs to the filter list, if this is done by the
* driver in response to set_tim() (which will only reduce the race
* this whole filtering tries to solve, not completely solve it)
* this situation cannot happen.
*
* To completely solve this race drivers need to make sure that they
* (a) don't mix the irq-safe/not irq-safe TX status/RX processing
* functions and
* (b) always process RX events before TX status events if ordering
* can be unknown, for example with different interrupt status
* bits.
* (c) if PS mode transitions are manual (i.e. the flag
* %IEEE80211_HW_AP_LINK_PS is set), always process PS state
* changes before calling TX status events if ordering can be
* unknown.
*/
if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
skb_queue_len(&sta->tx_filtered[ac]) < STA_MAX_TX_BUFFER) {
skb_queue_tail(&sta->tx_filtered[ac], skb);
sta_info_recalc_tim(sta);
if (!timer_pending(&local->sta_cleanup))
mod_timer(&local->sta_cleanup,
round_jiffies(jiffies +
STA_INFO_CLEANUP_INTERVAL));
return;
}
if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
!(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
/* Software retry the packet once */
info->flags |= IEEE80211_TX_INTFL_RETRIED;
ieee80211_add_pending_skb(local, skb);
return;
}
ps_dbg_ratelimited(sta->sdata,
"dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
skb_queue_len(&sta->tx_filtered[ac]),
!!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
ieee80211_free_txskb(&local->hw, skb);
}
static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
{
struct tid_ampdu_tx *tid_tx;
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
if (!tid_tx || !tid_tx->bar_pending)
return;
tid_tx->bar_pending = false;
ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn);
}
static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (void *) skb->data;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
if (ieee80211_is_data_qos(mgmt->frame_control)) {
struct ieee80211_hdr *hdr = (void *) skb->data;
u8 *qc = ieee80211_get_qos_ctl(hdr);
u16 tid = qc[0] & 0xf;
ieee80211_check_pending_bar(sta, hdr->addr1, tid);
}
if (ieee80211_is_action(mgmt->frame_control) &&
!ieee80211_has_protected(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_HT &&
mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS &&
ieee80211_sdata_running(sdata)) {
enum ieee80211_smps_mode smps_mode;
switch (mgmt->u.action.u.ht_smps.smps_control) {
case WLAN_HT_SMPS_CONTROL_DYNAMIC:
smps_mode = IEEE80211_SMPS_DYNAMIC;
break;
case WLAN_HT_SMPS_CONTROL_STATIC:
smps_mode = IEEE80211_SMPS_STATIC;
break;
case WLAN_HT_SMPS_CONTROL_DISABLED:
default: /* shouldn't happen since we don't send that */
smps_mode = IEEE80211_SMPS_OFF;
break;
}
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
/*
* This update looks racy, but isn't -- if we come
* here we've definitely got a station that we're
* talking to, and on a managed interface that can
* only be the AP. And the only other place updating
* this variable in managed mode is before association.
*/
sdata->deflink.smps_mode = smps_mode;
ieee80211_queue_work(&local->hw, &sdata->recalc_smps);
}
}
}
static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
{
struct tid_ampdu_tx *tid_tx;
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
if (!tid_tx)
return;
tid_tx->failed_bar_ssn = ssn;
tid_tx->bar_pending = true;
}
static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info,
struct ieee80211_tx_status *status)
{
struct ieee80211_rate_status *status_rate = NULL;
int len = sizeof(struct ieee80211_radiotap_header);
if (status && status->n_rates)
status_rate = &status->rates[status->n_rates - 1];
/* IEEE80211_RADIOTAP_RATE rate */
if (status_rate && !(status_rate->rate_idx.flags &
(RATE_INFO_FLAGS_MCS |
RATE_INFO_FLAGS_DMG |
RATE_INFO_FLAGS_EDMG |
RATE_INFO_FLAGS_VHT_MCS |
RATE_INFO_FLAGS_HE_MCS)))
len += 2;
else if (info->status.rates[0].idx >= 0 &&
!(info->status.rates[0].flags &
(IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS)))
len += 2;
/* IEEE80211_RADIOTAP_TX_FLAGS */
len += 2;
/* IEEE80211_RADIOTAP_DATA_RETRIES */
len += 1;
/* IEEE80211_RADIOTAP_MCS
* IEEE80211_RADIOTAP_VHT */
if (status_rate) {
if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_MCS)
len += 3;
else if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_VHT_MCS)
len = ALIGN(len, 2) + 12;
else if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_HE_MCS)
len = ALIGN(len, 2) + 12;
} else if (info->status.rates[0].idx >= 0) {
if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
len += 3;
else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS)
len = ALIGN(len, 2) + 12;
}
return len;
}
static void
ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
struct sk_buff *skb, int retry_count,
int rtap_len, int shift,
struct ieee80211_tx_status *status)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_radiotap_header *rthdr;
struct ieee80211_rate_status *status_rate = NULL;
unsigned char *pos;
u16 legacy_rate = 0;
u16 txflags;
if (status && status->n_rates)
status_rate = &status->rates[status->n_rates - 1];
rthdr = skb_push(skb, rtap_len);
memset(rthdr, 0, rtap_len);
rthdr->it_len = cpu_to_le16(rtap_len);
rthdr->it_present =
cpu_to_le32(BIT(IEEE80211_RADIOTAP_TX_FLAGS) |
BIT(IEEE80211_RADIOTAP_DATA_RETRIES));
pos = (unsigned char *)(rthdr + 1);
/*
* XXX: Once radiotap gets the bitmap reset thing the vendor
* extensions proposal contains, we can actually report
* the whole set of tries we did.
*/
/* IEEE80211_RADIOTAP_RATE */
if (status_rate) {
if (!(status_rate->rate_idx.flags &
(RATE_INFO_FLAGS_MCS |
RATE_INFO_FLAGS_DMG |
RATE_INFO_FLAGS_EDMG |
RATE_INFO_FLAGS_VHT_MCS |
RATE_INFO_FLAGS_HE_MCS)))
legacy_rate = status_rate->rate_idx.legacy;
} else if (info->status.rates[0].idx >= 0 &&
!(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
IEEE80211_TX_RC_VHT_MCS))) {
struct ieee80211_supported_band *sband;
sband = local->hw.wiphy->bands[info->band];
legacy_rate =
sband->bitrates[info->status.rates[0].idx].bitrate;
}
if (legacy_rate) {
rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_RATE));
*pos = DIV_ROUND_UP(legacy_rate, 5 * (1 << shift));
/* padding for tx flags */
pos += 2;
}
/* IEEE80211_RADIOTAP_TX_FLAGS */
txflags = 0;
if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
!is_multicast_ether_addr(hdr->addr1))
txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
put_unaligned_le16(txflags, pos);
pos += 2;
/* IEEE80211_RADIOTAP_DATA_RETRIES */
/* for now report the total retry_count */
*pos = retry_count;
pos++;
if (status_rate && (status_rate->rate_idx.flags & RATE_INFO_FLAGS_MCS))
{
rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS));
pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
IEEE80211_RADIOTAP_MCS_HAVE_GI |
IEEE80211_RADIOTAP_MCS_HAVE_BW;
if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_SHORT_GI)
pos[1] |= IEEE80211_RADIOTAP_MCS_SGI;
if (status_rate->rate_idx.bw == RATE_INFO_BW_40)
pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40;
pos[2] = status_rate->rate_idx.mcs;
pos += 3;
} else if (status_rate && (status_rate->rate_idx.flags &
RATE_INFO_FLAGS_VHT_MCS))
{
u16 known = local->hw.radiotap_vht_details &
(IEEE80211_RADIOTAP_VHT_KNOWN_GI |
IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH);
rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT));
/* required alignment from rthdr */
pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2);
/* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */
put_unaligned_le16(known, pos);
pos += 2;
/* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */
if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_SHORT_GI)
*pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
pos++;
/* u8 bandwidth */
switch (status_rate->rate_idx.bw) {
case RATE_INFO_BW_160:
*pos = 11;
break;
case RATE_INFO_BW_80:
*pos = 4;
break;
case RATE_INFO_BW_40:
*pos = 1;
break;
default:
*pos = 0;
break;
}
pos++;
/* u8 mcs_nss[4] */
*pos = (status_rate->rate_idx.mcs << 4) |
status_rate->rate_idx.nss;
pos += 4;
/* u8 coding */
pos++;
/* u8 group_id */
pos++;
/* u16 partial_aid */
pos += 2;
} else if (status_rate && (status_rate->rate_idx.flags &
RATE_INFO_FLAGS_HE_MCS))
{
struct ieee80211_radiotap_he *he;
rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE));
/* required alignment from rthdr */
pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2);
he = (struct ieee80211_radiotap_he *)pos;
he->data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU |
IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
he->data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN);
#define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
he->data6 |= HE_PREP(DATA6_NSTS, status_rate->rate_idx.nss);
#define CHECK_GI(s) \
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
(int)NL80211_RATE_INFO_HE_GI_##s)
CHECK_GI(0_8);
CHECK_GI(1_6);
CHECK_GI(3_2);
he->data3 |= HE_PREP(DATA3_DATA_MCS, status_rate->rate_idx.mcs);
he->data3 |= HE_PREP(DATA3_DATA_DCM, status_rate->rate_idx.he_dcm);
he->data5 |= HE_PREP(DATA5_GI, status_rate->rate_idx.he_gi);
switch (status_rate->rate_idx.bw) {
case RATE_INFO_BW_20:
he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
break;
case RATE_INFO_BW_40:
he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
break;
case RATE_INFO_BW_80:
he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
break;
case RATE_INFO_BW_160:
he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
break;
case RATE_INFO_BW_HE_RU:
#define CHECK_RU_ALLOC(s) \
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
CHECK_RU_ALLOC(26);
CHECK_RU_ALLOC(52);
CHECK_RU_ALLOC(106);
CHECK_RU_ALLOC(242);
CHECK_RU_ALLOC(484);
CHECK_RU_ALLOC(996);
CHECK_RU_ALLOC(2x996);
he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
status_rate->rate_idx.he_ru_alloc + 4);
break;
default:
WARN_ONCE(1, "Invalid SU BW %d\n", status_rate->rate_idx.bw);
}
pos += sizeof(struct ieee80211_radiotap_he);
}
if (status_rate || info->status.rates[0].idx < 0)
return;
/* IEEE80211_RADIOTAP_MCS
* IEEE80211_RADIOTAP_VHT */
if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS));
pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
IEEE80211_RADIOTAP_MCS_HAVE_GI |
IEEE80211_RADIOTAP_MCS_HAVE_BW;
if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
pos[1] |= IEEE80211_RADIOTAP_MCS_SGI;
if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40;
if (info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
pos[2] = info->status.rates[0].idx;
pos += 3;
} else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
u16 known = local->hw.radiotap_vht_details &
(IEEE80211_RADIOTAP_VHT_KNOWN_GI |
IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH);
rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT));
/* required alignment from rthdr */
pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2);
/* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */
put_unaligned_le16(known, pos);
pos += 2;
/* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */
if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
*pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
pos++;
/* u8 bandwidth */
if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
*pos = 1;
else if (info->status.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
*pos = 4;
else if (info->status.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
*pos = 11;
else /* IEEE80211_TX_RC_{20_MHZ_WIDTH,FIXME:DUP_DATA} */
*pos = 0;
pos++;
/* u8 mcs_nss[4] */
*pos = (ieee80211_rate_get_vht_mcs(&info->status.rates[0]) << 4) |
ieee80211_rate_get_vht_nss(&info->status.rates[0]);
pos += 4;
/* u8 coding */
pos++;
/* u8 group_id */
pos++;
/* u16 partial_aid */
pos += 2;
}
}
/*
* Handles the tx for TDLS teardown frames.
* If the frame wasn't ACKed by the peer - it will be re-sent through the AP
*/
static void ieee80211_tdls_td_tx_handle(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 flags)
{
struct sk_buff *teardown_skb;
struct sk_buff *orig_teardown_skb;
bool is_teardown = false;
/* Get the teardown data we need and free the lock */
spin_lock(&sdata->u.mgd.teardown_lock);
teardown_skb = sdata->u.mgd.teardown_skb;
orig_teardown_skb = sdata->u.mgd.orig_teardown_skb;
if ((skb == orig_teardown_skb) && teardown_skb) {
sdata->u.mgd.teardown_skb = NULL;
sdata->u.mgd.orig_teardown_skb = NULL;
is_teardown = true;
}
spin_unlock(&sdata->u.mgd.teardown_lock);
if (is_teardown) {
/* This mechanism relies on being able to get ACKs */
WARN_ON(!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS));
/* Check if peer has ACKed */
if (flags & IEEE80211_TX_STAT_ACK) {
dev_kfree_skb_any(teardown_skb);
} else {
tdls_dbg(sdata,
"TDLS Resending teardown through AP\n");
ieee80211_subif_start_xmit(teardown_skb, skb->dev);
}
}
}
static struct ieee80211_sub_if_data *
ieee80211_sdata_from_skb(struct ieee80211_local *local, struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata;
if (skb->dev) {
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (!sdata->dev)
continue;
if (skb->dev == sdata->dev)
return sdata;
}
return NULL;
}
return rcu_dereference(local->p2p_sdata);
}
static void ieee80211_report_ack_skb(struct ieee80211_local *local,
struct sk_buff *orig_skb,
bool acked, bool dropped,
ktime_t ack_hwtstamp)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(orig_skb);
struct sk_buff *skb;
unsigned long flags;
spin_lock_irqsave(&local->ack_status_lock, flags);
skb = idr_remove(&local->ack_status_frames, info->ack_frame_id);
spin_unlock_irqrestore(&local->ack_status_lock, flags);
if (!skb)
return;
if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_hdr *hdr = (void *)skb->data;
bool is_valid_ack_signal =
!!(info->status.flags & IEEE80211_TX_STATUS_ACK_SIGNAL_VALID);
struct cfg80211_tx_status status = {
.cookie = cookie,
.buf = skb->data,
.len = skb->len,
.ack = acked,
};
if (ieee80211_is_timing_measurement(orig_skb) ||
ieee80211_is_ftm(orig_skb)) {
status.tx_tstamp =
ktime_to_ns(skb_hwtstamps(orig_skb)->hwtstamp);
status.ack_tstamp = ktime_to_ns(ack_hwtstamp);
}
rcu_read_lock();
sdata = ieee80211_sdata_from_skb(local, skb);
if (sdata) {
if (skb->protocol == sdata->control_port_protocol ||
skb->protocol == cpu_to_be16(ETH_P_PREAUTH))
cfg80211_control_port_tx_status(&sdata->wdev,
cookie,
skb->data,
skb->len,
acked,
GFP_ATOMIC);
else if (ieee80211_is_any_nullfunc(hdr->frame_control))
cfg80211_probe_status(sdata->dev, hdr->addr1,
cookie, acked,
info->status.ack_signal,
is_valid_ack_signal,
GFP_ATOMIC);
else if (ieee80211_is_mgmt(hdr->frame_control))
cfg80211_mgmt_tx_status_ext(&sdata->wdev,
&status,
GFP_ATOMIC);
else
pr_warn("Unknown status report in ack skb\n");
}
rcu_read_unlock();
dev_kfree_skb_any(skb);
} else if (dropped) {
dev_kfree_skb_any(skb);
} else {
/* consumes skb */
skb_complete_wifi_ack(skb, acked);
}
}
static void ieee80211_report_used_skb(struct ieee80211_local *local,
struct sk_buff *skb, bool dropped,
ktime_t ack_hwtstamp)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u16 tx_time_est = ieee80211_info_get_tx_time_est(info);
struct ieee80211_hdr *hdr = (void *)skb->data;
bool acked = info->flags & IEEE80211_TX_STAT_ACK;
if (dropped)
acked = false;
if (tx_time_est) {
struct sta_info *sta;
rcu_read_lock();
sta = sta_info_get_by_addrs(local, hdr->addr1, hdr->addr2);
ieee80211_sta_update_pending_airtime(local, sta,
skb_get_queue_mapping(skb),
tx_time_est,
true);
rcu_read_unlock();
}
if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
struct ieee80211_sub_if_data *sdata;
rcu_read_lock();
sdata = ieee80211_sdata_from_skb(local, skb);
if (!sdata) {
skb->dev = NULL;
} else if (!dropped) {
unsigned int hdr_size =
ieee80211_hdrlen(hdr->frame_control);
/* Check to see if packet is a TDLS teardown packet */
if (ieee80211_is_data(hdr->frame_control) &&
(ieee80211_get_tdls_action(skb, hdr_size) ==
WLAN_TDLS_TEARDOWN)) {
ieee80211_tdls_td_tx_handle(local, sdata, skb,
info->flags);
} else if (ieee80211_s1g_is_twt_setup(skb)) {
if (!acked) {
struct sk_buff *qskb;
qskb = skb_clone(skb, GFP_ATOMIC);
if (qskb) {
skb_queue_tail(&sdata->status_queue,
qskb);
wiphy_work_queue(local->hw.wiphy,
&sdata->work);
}
}
} else {
ieee80211_mgd_conn_tx_status(sdata,
hdr->frame_control,
acked);
}
}
rcu_read_unlock();
} else if (info->ack_frame_id) {
ieee80211_report_ack_skb(local, skb, acked, dropped,
ack_hwtstamp);
}
if (!dropped && skb->destructor) {
skb->wifi_acked_valid = 1;
skb->wifi_acked = acked;
}
ieee80211_led_tx(local);
if (skb_has_frag_list(skb)) {
kfree_skb_list(skb_shinfo(skb)->frag_list);
skb_shinfo(skb)->frag_list = NULL;
}
}
/*
* Use a static threshold for now, best value to be determined
* by testing ...
* Should it depend on:
* - on # of retransmissions
* - current throughput (higher value for higher tpt)?
*/
#define STA_LOST_PKT_THRESHOLD 50
#define STA_LOST_PKT_TIME HZ /* 1 sec since last ACK */
#define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */
static void ieee80211_lost_packet(struct sta_info *sta,
struct ieee80211_tx_info *info)
{
unsigned long pkt_time = STA_LOST_PKT_TIME;
unsigned int pkt_thr = STA_LOST_PKT_THRESHOLD;
/* If driver relies on its own algorithm for station kickout, skip
* mac80211 packet loss mechanism.
*/
if (ieee80211_hw_check(&sta->local->hw, REPORTS_LOW_ACK))
return;
/* This packet was aggregated but doesn't carry status info */
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
sta->deflink.status_stats.lost_packets++;
if (sta->sta.tdls) {
pkt_time = STA_LOST_TDLS_PKT_TIME;
pkt_thr = STA_LOST_PKT_THRESHOLD;
}
/*
* If we're in TDLS mode, make sure that all STA_LOST_PKT_THRESHOLD
* of the last packets were lost, and that no ACK was received in the
* last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss
* mechanism.
* For non-TDLS, use STA_LOST_PKT_THRESHOLD and STA_LOST_PKT_TIME
*/
if (sta->deflink.status_stats.lost_packets < pkt_thr ||
!time_after(jiffies, sta->deflink.status_stats.last_pkt_time + pkt_time))
return;
cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
sta->deflink.status_stats.lost_packets,
GFP_ATOMIC);
sta->deflink.status_stats.lost_packets = 0;
}
static int ieee80211_tx_get_rates(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
int *retry_count)
{
int count = -1;
int i;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU)) {
/* just the first aggr frame carry status info */
info->status.rates[i].idx = -1;
info->status.rates[i].count = 0;
break;
} else if (info->status.rates[i].idx < 0) {
break;
} else if (i >= hw->max_report_rates) {
/* the HW cannot have attempted that rate */
info->status.rates[i].idx = -1;
info->status.rates[i].count = 0;
break;
}
count += info->status.rates[i].count;
}
if (count < 0)
count = 0;
*retry_count = count;
return i - 1;
}
void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
int retry_count, int shift, bool send_to_cooked,
struct ieee80211_tx_status *status)
{
struct sk_buff *skb2;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sub_if_data *sdata;
struct net_device *prev_dev = NULL;
int rtap_len;
/* send frame to monitor interfaces now */
rtap_len = ieee80211_tx_radiotap_len(info, status);
if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
pr_err("ieee80211_tx_status: headroom too small\n");
dev_kfree_skb(skb);
return;
}
ieee80211_add_tx_radiotap_header(local, skb, retry_count,
rtap_len, shift, status);
/* XXX: is this sufficient for BPF? */
skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
memset(skb->cb, 0, sizeof(skb->cb));
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
if (!ieee80211_sdata_running(sdata))
continue;
if ((sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) &&
!send_to_cooked)
continue;
if (prev_dev) {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
skb2->dev = prev_dev;
netif_rx(skb2);
}
}
prev_dev = sdata->dev;
}
}
if (prev_dev) {
skb->dev = prev_dev;
netif_rx(skb);
skb = NULL;
}
rcu_read_unlock();
dev_kfree_skb(skb);
}
static void __ieee80211_tx_status(struct ieee80211_hw *hw,
struct ieee80211_tx_status *status,
int rates_idx, int retry_count)
{
struct sk_buff *skb = status->skb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_tx_info *info = status->info;
struct sta_info *sta;
__le16 fc;
bool send_to_cooked;
bool acked;
bool noack_success;
struct ieee80211_bar *bar;
int shift = 0;
int tid = IEEE80211_NUM_TIDS;
fc = hdr->frame_control;
if (status->sta) {
sta = container_of(status->sta, struct sta_info, sta);
shift = ieee80211_vif_get_shift(&sta->sdata->vif);
if (info->flags & IEEE80211_TX_STATUS_EOSP)
clear_sta_flag(sta, WLAN_STA_SP);
acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
noack_success = !!(info->flags &
IEEE80211_TX_STAT_NOACK_TRANSMITTED);
/* mesh Peer Service Period support */
if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
ieee80211_is_data_qos(fc))
ieee80211_mpsp_trigger_process(
ieee80211_get_qos_ctl(hdr), sta, true, acked);
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
(ieee80211_is_data(hdr->frame_control)) &&
(rates_idx != -1))
sta->deflink.tx_stats.last_rate =
info->status.rates[rates_idx];
if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
(ieee80211_is_data_qos(fc))) {
u16 ssn;
u8 *qc;
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
& IEEE80211_SCTL_SEQ);
ieee80211_send_bar(&sta->sdata->vif, hdr->addr1,
tid, ssn);
} else if (ieee80211_is_data_qos(fc)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
}
if (!acked && ieee80211_is_back_req(fc)) {
u16 control;
/*
* BAR failed, store the last SSN and retry sending
* the BAR when the next unicast transmission on the
* same TID succeeds.
*/
bar = (struct ieee80211_bar *) skb->data;
control = le16_to_cpu(bar->control);
if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) {
u16 ssn = le16_to_cpu(bar->start_seq_num);
tid = (control &
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
ieee80211_set_bar_pending(sta, tid, ssn);
}
}
if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
ieee80211_handle_filtered_frame(local, sta, skb);
return;
} else if (ieee80211_is_data_present(fc)) {
if (!acked && !noack_success)
sta->deflink.status_stats.msdu_failed[tid]++;
sta->deflink.status_stats.msdu_retries[tid] +=
retry_count;
}
if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
ieee80211_frame_acked(sta, skb);
}
/* SNMP counters
* Fragments are passed to low-level drivers as separate skbs, so these
* are actually fragments, not frames. Update frame counters only for
* the first fragment of the frame. */
if ((info->flags & IEEE80211_TX_STAT_ACK) ||
(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED)) {
if (ieee80211_is_first_frag(hdr->seq_ctrl)) {
I802_DEBUG_INC(local->dot11TransmittedFrameCount);
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
if (retry_count > 0)
I802_DEBUG_INC(local->dot11RetryCount);
if (retry_count > 1)
I802_DEBUG_INC(local->dot11MultipleRetryCount);
}
/* This counter shall be incremented for an acknowledged MPDU
* with an individual address in the address 1 field or an MPDU
* with a multicast address in the address 1 field of type Data
* or Management. */
if (!is_multicast_ether_addr(hdr->addr1) ||
ieee80211_is_data(fc) ||
ieee80211_is_mgmt(fc))
I802_DEBUG_INC(local->dot11TransmittedFragmentCount);
} else {
if (ieee80211_is_first_frag(hdr->seq_ctrl))
I802_DEBUG_INC(local->dot11FailedCount);
}
if (ieee80211_is_any_nullfunc(fc) &&
ieee80211_has_pm(fc) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
local->ps_sdata && !(local->scanning)) {
if (info->flags & IEEE80211_TX_STAT_ACK)
local->ps_sdata->u.mgd.flags |=
IEEE80211_STA_NULLFUNC_ACKED;
mod_timer(&local->dynamic_ps_timer,
jiffies + msecs_to_jiffies(10));
}
ieee80211_report_used_skb(local, skb, false, status->ack_hwtstamp);
/* this was a transmitted frame, but now we want to reuse it */
skb_orphan(skb);
/* Need to make a copy before skb->cb gets cleared */
send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
!(ieee80211_is_data(fc));
/*
* This is a bit racy but we can avoid a lot of work
* with this test...
*/
if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
if (status->free_list)
list_add_tail(&skb->list, status->free_list);
else
dev_kfree_skb(skb);
return;
}
/* send to monitor interfaces */
ieee80211_tx_monitor(local, skb, retry_count, shift,
send_to_cooked, status);
}
void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_tx_status status = {
.skb = skb,
.info = IEEE80211_SKB_CB(skb),
};
struct sta_info *sta;
rcu_read_lock();
sta = sta_info_get_by_addrs(local, hdr->addr1, hdr->addr2);
if (sta)
status.sta = &sta->sta;
ieee80211_tx_status_ext(hw, &status);
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_tx_status);
void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
struct ieee80211_tx_status *status)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_tx_info *info = status->info;
struct ieee80211_sta *pubsta = status->sta;
struct sk_buff *skb = status->skb;
struct sta_info *sta = NULL;
int rates_idx, retry_count;
bool acked, noack_success, ack_signal_valid;
u16 tx_time_est;
if (pubsta) {
sta = container_of(pubsta, struct sta_info, sta);
if (status->n_rates)
sta->deflink.tx_stats.last_rate_info =
status->rates[status->n_rates - 1].rate_idx;
}
if (skb && (tx_time_est =
ieee80211_info_get_tx_time_est(IEEE80211_SKB_CB(skb))) > 0) {
/* Do this here to avoid the expensive lookup of the sta
* in ieee80211_report_used_skb().
*/
ieee80211_sta_update_pending_airtime(local, sta,
skb_get_queue_mapping(skb),
tx_time_est,
true);
ieee80211_info_set_tx_time_est(IEEE80211_SKB_CB(skb), 0);
}
if (!status->info)
goto free;
rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
ack_signal_valid =
!!(info->status.flags & IEEE80211_TX_STATUS_ACK_SIGNAL_VALID);
if (pubsta) {
struct ieee80211_sub_if_data *sdata = sta->sdata;
if (!acked && !noack_success)
sta->deflink.status_stats.retry_failed++;
sta->deflink.status_stats.retry_count += retry_count;
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
skb && !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
ieee80211_sta_tx_notify(sdata, (void *) skb->data,
acked, info->status.tx_time);
if (acked) {
sta->deflink.status_stats.last_ack = jiffies;
if (sta->deflink.status_stats.lost_packets)
sta->deflink.status_stats.lost_packets = 0;
/* Track when last packet was ACKed */
sta->deflink.status_stats.last_pkt_time = jiffies;
/* Reset connection monitor */
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
unlikely(sdata->u.mgd.probe_send_count > 0))
sdata->u.mgd.probe_send_count = 0;
if (ack_signal_valid) {
sta->deflink.status_stats.last_ack_signal =
(s8)info->status.ack_signal;
sta->deflink.status_stats.ack_signal_filled = true;
ewma_avg_signal_add(&sta->deflink.status_stats.avg_ack_signal,
-info->status.ack_signal);
}
} else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
/*
* The STA is in power save mode, so assume
* that this TX packet failed because of that.
*/
if (skb)
ieee80211_handle_filtered_frame(local, sta, skb);
return;
} else if (noack_success) {
/* nothing to do here, do not account as lost */
} else {
ieee80211_lost_packet(sta, info);
}
}
rate_control_tx_status(local, status);
if (ieee80211_vif_is_mesh(&sta->sdata->vif))
ieee80211s_update_metric(local, sta, status);
}
if (skb && !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
return __ieee80211_tx_status(hw, status, rates_idx,
retry_count);
if (acked || noack_success) {
I802_DEBUG_INC(local->dot11TransmittedFrameCount);
if (!pubsta)
I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
if (retry_count > 0)
I802_DEBUG_INC(local->dot11RetryCount);
if (retry_count > 1)
I802_DEBUG_INC(local->dot11MultipleRetryCount);
} else {
I802_DEBUG_INC(local->dot11FailedCount);
}
free:
if (!skb)
return;
ieee80211_report_used_skb(local, skb, false, status->ack_hwtstamp);
if (status->free_list)
list_add_tail(&skb->list, status->free_list);
else
dev_kfree_skb(skb);
}
EXPORT_SYMBOL(ieee80211_tx_status_ext);
void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
struct ieee80211_sta *pubsta,
struct ieee80211_tx_info *info)
{
struct ieee80211_local *local = hw_to_local(hw);
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_tx_status status = {
.info = info,
.sta = pubsta,
};
rate_control_tx_status(local, &status);
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
sta->deflink.tx_stats.last_rate = info->status.rates[0];
}
EXPORT_SYMBOL(ieee80211_tx_rate_update);
void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
num_packets, GFP_ATOMIC);
}
EXPORT_SYMBOL(ieee80211_report_low_ack);
void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ieee80211_local *local = hw_to_local(hw);
ktime_t kt = ktime_set(0, 0);
ieee80211_report_used_skb(local, skb, true, kt);
dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL(ieee80211_free_txskb);
void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
struct sk_buff_head *skbs)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(skbs)))
ieee80211_free_txskb(hw, skb);
}
| linux-master | net/mac80211/status.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2003-2004, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2014-2015, Qualcomm Atheros, Inc.
*
* Rewrite: Copyright (C) 2013 Linaro Ltd <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
#include <crypto/aead.h>
#include "aead_api.h"
int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
u8 *data, size_t data_len, u8 *mic)
{
size_t mic_len = crypto_aead_authsize(tfm);
struct scatterlist sg[3];
struct aead_request *aead_req;
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
u8 *__aad;
int ret;
aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC);
if (!aead_req)
return -ENOMEM;
__aad = (u8 *)aead_req + reqsize;
memcpy(__aad, aad, aad_len);
sg_init_table(sg, 3);
sg_set_buf(&sg[0], __aad, aad_len);
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, mic_len);
aead_request_set_tfm(aead_req, tfm);
aead_request_set_crypt(aead_req, sg, sg, data_len, b_0);
aead_request_set_ad(aead_req, sg[0].length);
ret = crypto_aead_encrypt(aead_req);
kfree_sensitive(aead_req);
return ret;
}
int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
u8 *data, size_t data_len, u8 *mic)
{
size_t mic_len = crypto_aead_authsize(tfm);
struct scatterlist sg[3];
struct aead_request *aead_req;
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
u8 *__aad;
int err;
if (data_len == 0)
return -EINVAL;
aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC);
if (!aead_req)
return -ENOMEM;
__aad = (u8 *)aead_req + reqsize;
memcpy(__aad, aad, aad_len);
sg_init_table(sg, 3);
sg_set_buf(&sg[0], __aad, aad_len);
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, mic_len);
aead_request_set_tfm(aead_req, tfm);
aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0);
aead_request_set_ad(aead_req, sg[0].length);
err = crypto_aead_decrypt(aead_req);
kfree_sensitive(aead_req);
return err;
}
struct crypto_aead *
aead_key_setup_encrypt(const char *alg, const u8 key[],
size_t key_len, size_t mic_len)
{
struct crypto_aead *tfm;
int err;
tfm = crypto_alloc_aead(alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return tfm;
err = crypto_aead_setkey(tfm, key, key_len);
if (err)
goto free_aead;
err = crypto_aead_setauthsize(tfm, mic_len);
if (err)
goto free_aead;
return tfm;
free_aead:
crypto_free_aead(tfm);
return ERR_PTR(err);
}
void aead_key_free(struct crypto_aead *tfm)
{
crypto_free_aead(tfm);
}
| linux-master | net/mac80211/aead_api.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2003-2005 Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <[email protected]>
* Copyright 2007 Johannes Berg <[email protected]>
* Copyright (C) 2015 Intel Deutschland GmbH
* Copyright (C) 2021-2022 Intel Corporation
*/
#include <linux/kobject.h>
#include <linux/slab.h>
#include "ieee80211_i.h"
#include "key.h"
#include "debugfs.h"
#include "debugfs_key.h"
#define KEY_READ(name, prop, format_string) \
static ssize_t key_##name##_read(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
struct ieee80211_key *key = file->private_data; \
return mac80211_format_buffer(userbuf, count, ppos, \
format_string, key->prop); \
}
#define KEY_READ_X(name) KEY_READ(name, name, "0x%x\n")
#define KEY_OPS(name) \
static const struct file_operations key_ ##name## _ops = { \
.read = key_##name##_read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
}
#define KEY_OPS_W(name) \
static const struct file_operations key_ ##name## _ops = { \
.read = key_##name##_read, \
.write = key_##name##_write, \
.open = simple_open, \
.llseek = generic_file_llseek, \
}
#define KEY_FILE(name, format) \
KEY_READ_##format(name) \
KEY_OPS(name)
#define KEY_CONF_READ(name, format_string) \
KEY_READ(conf_##name, conf.name, format_string)
#define KEY_CONF_READ_D(name) KEY_CONF_READ(name, "%d\n")
#define KEY_CONF_OPS(name) \
static const struct file_operations key_ ##name## _ops = { \
.read = key_conf_##name##_read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
}
#define KEY_CONF_FILE(name, format) \
KEY_CONF_READ_##format(name) \
KEY_CONF_OPS(name)
KEY_CONF_FILE(keylen, D);
KEY_CONF_FILE(keyidx, D);
KEY_CONF_FILE(hw_key_idx, D);
KEY_FILE(flags, X);
KEY_READ(ifindex, sdata->name, "%s\n");
KEY_OPS(ifindex);
static ssize_t key_algorithm_read(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
char buf[15];
struct ieee80211_key *key = file->private_data;
u32 c = key->conf.cipher;
sprintf(buf, "%.2x-%.2x-%.2x:%d\n",
c >> 24, (c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff);
return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
}
KEY_OPS(algorithm);
static ssize_t key_tx_spec_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ieee80211_key *key = file->private_data;
u64 pn;
int ret;
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
return -EINVAL;
case WLAN_CIPHER_SUITE_TKIP:
/* not supported yet */
return -EOPNOTSUPP;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
ret = kstrtou64_from_user(userbuf, count, 16, &pn);
if (ret)
return ret;
/* PN is a 48-bit counter */
if (pn >= (1ULL << 48))
return -ERANGE;
atomic64_set(&key->conf.tx_pn, pn);
return count;
default:
return 0;
}
}
static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
u64 pn;
char buf[20];
int len;
struct ieee80211_key *key = file->private_data;
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
len = scnprintf(buf, sizeof(buf), "\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
pn = atomic64_read(&key->conf.tx_pn);
len = scnprintf(buf, sizeof(buf), "%08x %04x\n",
TKIP_PN_TO_IV32(pn),
TKIP_PN_TO_IV16(pn));
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
pn = atomic64_read(&key->conf.tx_pn);
len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
(u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
(u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
break;
default:
return 0;
}
return simple_read_from_buffer(userbuf, count, ppos, buf, len);
}
KEY_OPS_W(tx_spec);
static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ieee80211_key *key = file->private_data;
char buf[14*IEEE80211_NUM_TIDS+1], *p = buf;
int i, len;
const u8 *rpn;
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
len = scnprintf(buf, sizeof(buf), "\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
for (i = 0; i < IEEE80211_NUM_TIDS; i++)
p += scnprintf(p, sizeof(buf)+buf-p,
"%08x %04x\n",
key->u.tkip.rx[i].iv32,
key->u.tkip.rx[i].iv16);
len = p - buf;
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
rpn = key->u.ccmp.rx_pn[i];
p += scnprintf(p, sizeof(buf)+buf-p,
"%02x%02x%02x%02x%02x%02x\n",
rpn[0], rpn[1], rpn[2],
rpn[3], rpn[4], rpn[5]);
}
len = p - buf;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
rpn = key->u.aes_cmac.rx_pn;
p += scnprintf(p, sizeof(buf)+buf-p,
"%02x%02x%02x%02x%02x%02x\n",
rpn[0], rpn[1], rpn[2],
rpn[3], rpn[4], rpn[5]);
len = p - buf;
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
rpn = key->u.aes_gmac.rx_pn;
p += scnprintf(p, sizeof(buf)+buf-p,
"%02x%02x%02x%02x%02x%02x\n",
rpn[0], rpn[1], rpn[2],
rpn[3], rpn[4], rpn[5]);
len = p - buf;
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
rpn = key->u.gcmp.rx_pn[i];
p += scnprintf(p, sizeof(buf)+buf-p,
"%02x%02x%02x%02x%02x%02x\n",
rpn[0], rpn[1], rpn[2],
rpn[3], rpn[4], rpn[5]);
}
len = p - buf;
break;
default:
return 0;
}
return simple_read_from_buffer(userbuf, count, ppos, buf, len);
}
KEY_OPS(rx_spec);
static ssize_t key_replays_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ieee80211_key *key = file->private_data;
char buf[20];
int len;
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
len = scnprintf(buf, sizeof(buf), "%u\n",
key->u.aes_cmac.replays);
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
len = scnprintf(buf, sizeof(buf), "%u\n",
key->u.aes_gmac.replays);
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
len = scnprintf(buf, sizeof(buf), "%u\n", key->u.gcmp.replays);
break;
default:
return 0;
}
return simple_read_from_buffer(userbuf, count, ppos, buf, len);
}
KEY_OPS(replays);
static ssize_t key_icverrors_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ieee80211_key *key = file->private_data;
char buf[20];
int len;
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
len = scnprintf(buf, sizeof(buf), "%u\n",
key->u.aes_cmac.icverrors);
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
len = scnprintf(buf, sizeof(buf), "%u\n",
key->u.aes_gmac.icverrors);
break;
default:
return 0;
}
return simple_read_from_buffer(userbuf, count, ppos, buf, len);
}
KEY_OPS(icverrors);
static ssize_t key_mic_failures_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ieee80211_key *key = file->private_data;
char buf[20];
int len;
if (key->conf.cipher != WLAN_CIPHER_SUITE_TKIP)
return -EINVAL;
len = scnprintf(buf, sizeof(buf), "%u\n", key->u.tkip.mic_failures);
return simple_read_from_buffer(userbuf, count, ppos, buf, len);
}
KEY_OPS(mic_failures);
static ssize_t key_key_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ieee80211_key *key = file->private_data;
int i, bufsize = 2 * key->conf.keylen + 2;
char *buf = kmalloc(bufsize, GFP_KERNEL);
char *p = buf;
ssize_t res;
if (!buf)
return -ENOMEM;
for (i = 0; i < key->conf.keylen; i++)
p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
p += scnprintf(p, bufsize+buf-p, "\n");
res = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
return res;
}
KEY_OPS(key);
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, key->debugfs.dir, \
key, &key_##name##_ops)
#define DEBUGFS_ADD_W(name) \
debugfs_create_file(#name, 0600, key->debugfs.dir, \
key, &key_##name##_ops);
void ieee80211_debugfs_key_add(struct ieee80211_key *key)
{
static int keycount;
char buf[100];
struct sta_info *sta;
if (!key->local->debugfs.keys)
return;
sprintf(buf, "%d", keycount);
key->debugfs.cnt = keycount;
keycount++;
key->debugfs.dir = debugfs_create_dir(buf,
key->local->debugfs.keys);
sta = key->sta;
if (sta) {
sprintf(buf, "../../netdev:%s/stations/%pM",
sta->sdata->name, sta->sta.addr);
key->debugfs.stalink =
debugfs_create_symlink("station", key->debugfs.dir, buf);
}
DEBUGFS_ADD(keylen);
DEBUGFS_ADD(flags);
DEBUGFS_ADD(keyidx);
DEBUGFS_ADD(hw_key_idx);
DEBUGFS_ADD(algorithm);
DEBUGFS_ADD_W(tx_spec);
DEBUGFS_ADD(rx_spec);
DEBUGFS_ADD(replays);
DEBUGFS_ADD(icverrors);
DEBUGFS_ADD(mic_failures);
DEBUGFS_ADD(key);
DEBUGFS_ADD(ifindex);
};
void ieee80211_debugfs_key_remove(struct ieee80211_key *key)
{
if (!key)
return;
debugfs_remove_recursive(key->debugfs.dir);
key->debugfs.dir = NULL;
}
void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
{
char buf[50];
struct ieee80211_key *key;
if (!sdata->vif.debugfs_dir)
return;
lockdep_assert_held(&sdata->local->key_mtx);
debugfs_remove(sdata->debugfs.default_unicast_key);
sdata->debugfs.default_unicast_key = NULL;
if (sdata->default_unicast_key) {
key = key_mtx_dereference(sdata->local,
sdata->default_unicast_key);
sprintf(buf, "../keys/%d", key->debugfs.cnt);
sdata->debugfs.default_unicast_key =
debugfs_create_symlink("default_unicast_key",
sdata->vif.debugfs_dir, buf);
}
debugfs_remove(sdata->debugfs.default_multicast_key);
sdata->debugfs.default_multicast_key = NULL;
if (sdata->deflink.default_multicast_key) {
key = key_mtx_dereference(sdata->local,
sdata->deflink.default_multicast_key);
sprintf(buf, "../keys/%d", key->debugfs.cnt);
sdata->debugfs.default_multicast_key =
debugfs_create_symlink("default_multicast_key",
sdata->vif.debugfs_dir, buf);
}
}
void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
{
char buf[50];
struct ieee80211_key *key;
if (!sdata->vif.debugfs_dir)
return;
key = key_mtx_dereference(sdata->local,
sdata->deflink.default_mgmt_key);
if (key) {
sprintf(buf, "../keys/%d", key->debugfs.cnt);
sdata->debugfs.default_mgmt_key =
debugfs_create_symlink("default_mgmt_key",
sdata->vif.debugfs_dir, buf);
} else
ieee80211_debugfs_key_remove_mgmt_default(sdata);
}
void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sdata)
{
if (!sdata)
return;
debugfs_remove(sdata->debugfs.default_mgmt_key);
sdata->debugfs.default_mgmt_key = NULL;
}
void
ieee80211_debugfs_key_add_beacon_default(struct ieee80211_sub_if_data *sdata)
{
char buf[50];
struct ieee80211_key *key;
if (!sdata->vif.debugfs_dir)
return;
key = key_mtx_dereference(sdata->local,
sdata->deflink.default_beacon_key);
if (key) {
sprintf(buf, "../keys/%d", key->debugfs.cnt);
sdata->debugfs.default_beacon_key =
debugfs_create_symlink("default_beacon_key",
sdata->vif.debugfs_dir, buf);
} else {
ieee80211_debugfs_key_remove_beacon_default(sdata);
}
}
void
ieee80211_debugfs_key_remove_beacon_default(struct ieee80211_sub_if_data *sdata)
{
if (!sdata)
return;
debugfs_remove(sdata->debugfs.default_beacon_key);
sdata->debugfs.default_beacon_key = NULL;
}
void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
struct sta_info *sta)
{
debugfs_remove(key->debugfs.stalink);
key->debugfs.stalink = NULL;
}
| linux-master | net/mac80211/debugfs_key.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007-2008 Johannes Berg <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
* Copyright 2018-2020, 2022-2023 Intel Corporation
*/
#include <crypto/utils.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "debugfs_key.h"
#include "aes_ccm.h"
#include "aes_cmac.h"
#include "aes_gmac.h"
#include "aes_gcm.h"
/**
* DOC: Key handling basics
*
* Key handling in mac80211 is done based on per-interface (sub_if_data)
* keys and per-station keys. Since each station belongs to an interface,
* each station key also belongs to that interface.
*
* Hardware acceleration is done on a best-effort basis for algorithms
* that are implemented in software, for each key the hardware is asked
* to enable that key for offloading but if it cannot do that the key is
* simply kept for software encryption (unless it is for an algorithm
* that isn't implemented in software).
* There is currently no way of knowing whether a key is handled in SW
* or HW except by looking into debugfs.
*
* All key management is internally protected by a mutex. Within all
* other parts of mac80211, key references are, just as STA structure
* references, protected by RCU. Note, however, that some things are
* unprotected, namely the key->sta dereferences within the hardware
* acceleration functions. This means that sta_info_destroy() must
* remove the key which waits for an RCU grace period.
*/
static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
static void assert_key_lock(struct ieee80211_local *local)
{
lockdep_assert_held(&local->key_mtx);
}
static void
update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
{
struct ieee80211_sub_if_data *vlan;
if (sdata->vif.type != NL80211_IFTYPE_AP)
return;
/* crypto_tx_tailroom_needed_cnt is protected by this */
assert_key_lock(sdata->local);
rcu_read_lock();
list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list)
vlan->crypto_tx_tailroom_needed_cnt += delta;
rcu_read_unlock();
}
static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
{
/*
* When this count is zero, SKB resizing for allocating tailroom
* for IV or MMIC is skipped. But, this check has created two race
* cases in xmit path while transiting from zero count to one:
*
* 1. SKB resize was skipped because no key was added but just before
* the xmit key is added and SW encryption kicks off.
*
* 2. SKB resize was skipped because all the keys were hw planted but
* just before xmit one of the key is deleted and SW encryption kicks
* off.
*
* In both the above case SW encryption will find not enough space for
* tailroom and exits with WARN_ON. (See WARN_ONs at wpa.c)
*
* Solution has been explained at
* http://mid.gmane.org/[email protected]
*/
assert_key_lock(sdata->local);
update_vlan_tailroom_need_count(sdata, 1);
if (!sdata->crypto_tx_tailroom_needed_cnt++) {
/*
* Flush all XMIT packets currently using HW encryption or no
* encryption at all if the count transition is from 0 -> 1.
*/
synchronize_net();
}
}
static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
int delta)
{
assert_key_lock(sdata->local);
WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
update_vlan_tailroom_need_count(sdata, -delta);
sdata->crypto_tx_tailroom_needed_cnt -= delta;
}
static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
{
struct ieee80211_sub_if_data *sdata = key->sdata;
struct sta_info *sta;
int ret = -EOPNOTSUPP;
might_sleep();
if (key->flags & KEY_FLAG_TAINTED) {
/* If we get here, it's during resume and the key is
* tainted so shouldn't be used/programmed any more.
* However, its flags may still indicate that it was
* programmed into the device (since we're in resume)
* so clear that flag now to avoid trying to remove
* it again later.
*/
if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
IEEE80211_KEY_FLAG_PUT_MIC_SPACE |
IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
increment_tailroom_need_count(sdata);
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
return -EINVAL;
}
if (!key->local->ops->set_key)
goto out_unsupported;
assert_key_lock(key->local);
sta = key->sta;
/*
* If this is a per-STA GTK, check if it
* is supported; if not, return.
*/
if (sta && !(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE) &&
!ieee80211_hw_check(&key->local->hw, SUPPORTS_PER_STA_GTK))
goto out_unsupported;
if (sta && !sta->uploaded)
goto out_unsupported;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
/*
* The driver doesn't know anything about VLAN interfaces.
* Hence, don't send GTKs for VLAN interfaces to the driver.
*/
if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
ret = 1;
goto out_unsupported;
}
}
if (key->conf.link_id >= 0 && sdata->vif.active_links &&
!(sdata->vif.active_links & BIT(key->conf.link_id)))
return 0;
ret = drv_set_key(key->local, SET_KEY, sdata,
sta ? &sta->sta : NULL, &key->conf);
if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
IEEE80211_KEY_FLAG_PUT_MIC_SPACE |
IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
decrease_tailroom_need_count(sdata, 1);
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) &&
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC));
return 0;
}
if (ret != -ENOSPC && ret != -EOPNOTSUPP && ret != 1)
sdata_err(sdata,
"failed to set key (%d, %pM) to hardware (%d)\n",
key->conf.keyidx,
sta ? sta->sta.addr : bcast_addr, ret);
out_unsupported:
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
/* all of these we can do in software - if driver can */
if (ret == 1)
return 0;
if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
return -EINVAL;
return 0;
default:
return -EINVAL;
}
}
static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
{
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
int ret;
might_sleep();
if (!key || !key->local->ops->set_key)
return;
assert_key_lock(key->local);
if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
return;
sta = key->sta;
sdata = key->sdata;
if (key->conf.link_id >= 0 && sdata->vif.active_links &&
!(sdata->vif.active_links & BIT(key->conf.link_id)))
return;
if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
IEEE80211_KEY_FLAG_PUT_MIC_SPACE |
IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
increment_tailroom_need_count(sdata);
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
ret = drv_set_key(key->local, DISABLE_KEY, sdata,
sta ? &sta->sta : NULL, &key->conf);
if (ret)
sdata_err(sdata,
"failed to remove key (%d, %pM) from hardware (%d)\n",
key->conf.keyidx,
sta ? sta->sta.addr : bcast_addr, ret);
}
static int _ieee80211_set_tx_key(struct ieee80211_key *key, bool force)
{
struct sta_info *sta = key->sta;
struct ieee80211_local *local = key->local;
assert_key_lock(local);
set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION);
sta->ptk_idx = key->conf.keyidx;
if (force || !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT))
clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_check_fast_xmit(sta);
return 0;
}
int ieee80211_set_tx_key(struct ieee80211_key *key)
{
return _ieee80211_set_tx_key(key, false);
}
static void ieee80211_pairwise_rekey(struct ieee80211_key *old,
struct ieee80211_key *new)
{
struct ieee80211_local *local = new->local;
struct sta_info *sta = new->sta;
int i;
assert_key_lock(local);
if (new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX) {
/* Extended Key ID key install, initial one or rekey */
if (sta->ptk_idx != INVALID_PTK_KEYIDX &&
!ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) {
/* Aggregation Sessions with Extended Key ID must not
* mix MPDUs with different keyIDs within one A-MPDU.
* Tear down running Tx aggregation sessions and block
* new Rx/Tx aggregation requests during rekey to
* ensure there are no A-MPDUs when the driver is not
* supporting A-MPDU key borders. (Blocking Tx only
* would be sufficient but WLAN_STA_BLOCK_BA gets the
* job done for the few ms we need it.)
*/
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
mutex_lock(&sta->ampdu_mlme.mtx);
for (i = 0; i < IEEE80211_NUM_TIDS; i++)
___ieee80211_stop_tx_ba_session(sta, i,
AGG_STOP_LOCAL_REQUEST);
mutex_unlock(&sta->ampdu_mlme.mtx);
}
} else if (old) {
/* Rekey without Extended Key ID.
* Aggregation sessions are OK when running on SW crypto.
* A broken remote STA may cause issues not observed with HW
* crypto, though.
*/
if (!(old->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
return;
/* Stop Tx till we are on the new key */
old->flags |= KEY_FLAG_TAINTED;
ieee80211_clear_fast_xmit(sta);
if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) {
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta,
AGG_STOP_LOCAL_REQUEST);
}
if (!wiphy_ext_feature_isset(local->hw.wiphy,
NL80211_EXT_FEATURE_CAN_REPLACE_PTK0)) {
pr_warn_ratelimited("Rekeying PTK for STA %pM but driver can't safely do that.",
sta->sta.addr);
/* Flushing the driver queues *may* help prevent
* the clear text leaks and freezes.
*/
ieee80211_flush_queues(local, old->sdata, false);
}
}
}
static void __ieee80211_set_default_key(struct ieee80211_link_data *link,
int idx, bool uni, bool multi)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_key *key = NULL;
assert_key_lock(sdata->local);
if (idx >= 0 && idx < NUM_DEFAULT_KEYS) {
key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
if (!key)
key = key_mtx_dereference(sdata->local, link->gtk[idx]);
}
if (uni) {
rcu_assign_pointer(sdata->default_unicast_key, key);
ieee80211_check_fast_xmit_iface(sdata);
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
drv_set_default_unicast_key(sdata->local, sdata, idx);
}
if (multi)
rcu_assign_pointer(link->default_multicast_key, key);
ieee80211_debugfs_key_update_default(sdata);
}
void ieee80211_set_default_key(struct ieee80211_link_data *link, int idx,
bool uni, bool multi)
{
mutex_lock(&link->sdata->local->key_mtx);
__ieee80211_set_default_key(link, idx, uni, multi);
mutex_unlock(&link->sdata->local->key_mtx);
}
static void
__ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link, int idx)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_key *key = NULL;
assert_key_lock(sdata->local);
if (idx >= NUM_DEFAULT_KEYS &&
idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
key = key_mtx_dereference(sdata->local, link->gtk[idx]);
rcu_assign_pointer(link->default_mgmt_key, key);
ieee80211_debugfs_key_update_default(sdata);
}
void ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link,
int idx)
{
mutex_lock(&link->sdata->local->key_mtx);
__ieee80211_set_default_mgmt_key(link, idx);
mutex_unlock(&link->sdata->local->key_mtx);
}
static void
__ieee80211_set_default_beacon_key(struct ieee80211_link_data *link, int idx)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_key *key = NULL;
assert_key_lock(sdata->local);
if (idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS &&
idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
NUM_DEFAULT_BEACON_KEYS)
key = key_mtx_dereference(sdata->local, link->gtk[idx]);
rcu_assign_pointer(link->default_beacon_key, key);
ieee80211_debugfs_key_update_default(sdata);
}
void ieee80211_set_default_beacon_key(struct ieee80211_link_data *link,
int idx)
{
mutex_lock(&link->sdata->local->key_mtx);
__ieee80211_set_default_beacon_key(link, idx);
mutex_unlock(&link->sdata->local->key_mtx);
}
static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
struct sta_info *sta,
bool pairwise,
struct ieee80211_key *old,
struct ieee80211_key *new)
{
struct link_sta_info *link_sta = sta ? &sta->deflink : NULL;
int link_id;
int idx;
int ret = 0;
bool defunikey, defmultikey, defmgmtkey, defbeaconkey;
bool is_wep;
/* caller must provide at least one old/new */
if (WARN_ON(!new && !old))
return 0;
if (new) {
idx = new->conf.keyidx;
is_wep = new->conf.cipher == WLAN_CIPHER_SUITE_WEP40 ||
new->conf.cipher == WLAN_CIPHER_SUITE_WEP104;
link_id = new->conf.link_id;
} else {
idx = old->conf.keyidx;
is_wep = old->conf.cipher == WLAN_CIPHER_SUITE_WEP40 ||
old->conf.cipher == WLAN_CIPHER_SUITE_WEP104;
link_id = old->conf.link_id;
}
if (WARN(old && old->conf.link_id != link_id,
"old link ID %d doesn't match new link ID %d\n",
old->conf.link_id, link_id))
return -EINVAL;
if (link_id >= 0) {
if (!link) {
link = sdata_dereference(sdata->link[link_id], sdata);
if (!link)
return -ENOLINK;
}
if (sta) {
link_sta = rcu_dereference_protected(sta->link[link_id],
lockdep_is_held(&sta->local->sta_mtx));
if (!link_sta)
return -ENOLINK;
}
} else {
link = &sdata->deflink;
}
if ((is_wep || pairwise) && idx >= NUM_DEFAULT_KEYS)
return -EINVAL;
WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx);
if (new && sta && pairwise) {
/* Unicast rekey needs special handling. With Extended Key ID
* old is still NULL for the first rekey.
*/
ieee80211_pairwise_rekey(old, new);
}
if (old) {
if (old->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
ieee80211_key_disable_hw_accel(old);
if (new)
ret = ieee80211_key_enable_hw_accel(new);
}
} else {
if (!new->local->wowlan) {
ret = ieee80211_key_enable_hw_accel(new);
} else {
assert_key_lock(new->local);
new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
}
}
if (ret)
return ret;
if (new)
list_add_tail_rcu(&new->list, &sdata->key_list);
if (sta) {
if (pairwise) {
rcu_assign_pointer(sta->ptk[idx], new);
if (new &&
!(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX))
_ieee80211_set_tx_key(new, true);
} else {
rcu_assign_pointer(link_sta->gtk[idx], new);
}
/* Only needed for transition from no key -> key.
* Still triggers unnecessary when using Extended Key ID
* and installing the second key ID the first time.
*/
if (new && !old)
ieee80211_check_fast_rx(sta);
} else {
defunikey = old &&
old == key_mtx_dereference(sdata->local,
sdata->default_unicast_key);
defmultikey = old &&
old == key_mtx_dereference(sdata->local,
link->default_multicast_key);
defmgmtkey = old &&
old == key_mtx_dereference(sdata->local,
link->default_mgmt_key);
defbeaconkey = old &&
old == key_mtx_dereference(sdata->local,
link->default_beacon_key);
if (defunikey && !new)
__ieee80211_set_default_key(link, -1, true, false);
if (defmultikey && !new)
__ieee80211_set_default_key(link, -1, false, true);
if (defmgmtkey && !new)
__ieee80211_set_default_mgmt_key(link, -1);
if (defbeaconkey && !new)
__ieee80211_set_default_beacon_key(link, -1);
if (is_wep || pairwise)
rcu_assign_pointer(sdata->keys[idx], new);
else
rcu_assign_pointer(link->gtk[idx], new);
if (defunikey && new)
__ieee80211_set_default_key(link, new->conf.keyidx,
true, false);
if (defmultikey && new)
__ieee80211_set_default_key(link, new->conf.keyidx,
false, true);
if (defmgmtkey && new)
__ieee80211_set_default_mgmt_key(link,
new->conf.keyidx);
if (defbeaconkey && new)
__ieee80211_set_default_beacon_key(link,
new->conf.keyidx);
}
if (old)
list_del_rcu(&old->list);
return 0;
}
struct ieee80211_key *
ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
const u8 *key_data,
size_t seq_len, const u8 *seq)
{
struct ieee80211_key *key;
int i, j, err;
if (WARN_ON(idx < 0 ||
idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
NUM_DEFAULT_BEACON_KEYS))
return ERR_PTR(-EINVAL);
key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL);
if (!key)
return ERR_PTR(-ENOMEM);
/*
* Default to software encryption; we'll later upload the
* key to the hardware if possible.
*/
key->conf.flags = 0;
key->flags = 0;
key->conf.link_id = -1;
key->conf.cipher = cipher;
key->conf.keyidx = idx;
key->conf.keylen = key_len;
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
key->conf.iv_len = IEEE80211_WEP_IV_LEN;
key->conf.icv_len = IEEE80211_WEP_ICV_LEN;
break;
case WLAN_CIPHER_SUITE_TKIP:
key->conf.iv_len = IEEE80211_TKIP_IV_LEN;
key->conf.icv_len = IEEE80211_TKIP_ICV_LEN;
if (seq) {
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
key->u.tkip.rx[i].iv32 =
get_unaligned_le32(&seq[2]);
key->u.tkip.rx[i].iv16 =
get_unaligned_le16(seq);
}
}
spin_lock_init(&key->u.tkip.txlock);
break;
case WLAN_CIPHER_SUITE_CCMP:
key->conf.iv_len = IEEE80211_CCMP_HDR_LEN;
key->conf.icv_len = IEEE80211_CCMP_MIC_LEN;
if (seq) {
for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
for (j = 0; j < IEEE80211_CCMP_PN_LEN; j++)
key->u.ccmp.rx_pn[i][j] =
seq[IEEE80211_CCMP_PN_LEN - j - 1];
}
/*
* Initialize AES key state here as an optimization so that
* it does not need to be initialized for every packet.
*/
key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(
key_data, key_len, IEEE80211_CCMP_MIC_LEN);
if (IS_ERR(key->u.ccmp.tfm)) {
err = PTR_ERR(key->u.ccmp.tfm);
kfree(key);
return ERR_PTR(err);
}
break;
case WLAN_CIPHER_SUITE_CCMP_256:
key->conf.iv_len = IEEE80211_CCMP_256_HDR_LEN;
key->conf.icv_len = IEEE80211_CCMP_256_MIC_LEN;
for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++)
for (j = 0; j < IEEE80211_CCMP_256_PN_LEN; j++)
key->u.ccmp.rx_pn[i][j] =
seq[IEEE80211_CCMP_256_PN_LEN - j - 1];
/* Initialize AES key state here as an optimization so that
* it does not need to be initialized for every packet.
*/
key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(
key_data, key_len, IEEE80211_CCMP_256_MIC_LEN);
if (IS_ERR(key->u.ccmp.tfm)) {
err = PTR_ERR(key->u.ccmp.tfm);
kfree(key);
return ERR_PTR(err);
}
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
key->conf.iv_len = 0;
if (cipher == WLAN_CIPHER_SUITE_AES_CMAC)
key->conf.icv_len = sizeof(struct ieee80211_mmie);
else
key->conf.icv_len = sizeof(struct ieee80211_mmie_16);
if (seq)
for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++)
key->u.aes_cmac.rx_pn[j] =
seq[IEEE80211_CMAC_PN_LEN - j - 1];
/*
* Initialize AES key state here as an optimization so that
* it does not need to be initialized for every packet.
*/
key->u.aes_cmac.tfm =
ieee80211_aes_cmac_key_setup(key_data, key_len);
if (IS_ERR(key->u.aes_cmac.tfm)) {
err = PTR_ERR(key->u.aes_cmac.tfm);
kfree(key);
return ERR_PTR(err);
}
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
key->conf.iv_len = 0;
key->conf.icv_len = sizeof(struct ieee80211_mmie_16);
if (seq)
for (j = 0; j < IEEE80211_GMAC_PN_LEN; j++)
key->u.aes_gmac.rx_pn[j] =
seq[IEEE80211_GMAC_PN_LEN - j - 1];
/* Initialize AES key state here as an optimization so that
* it does not need to be initialized for every packet.
*/
key->u.aes_gmac.tfm =
ieee80211_aes_gmac_key_setup(key_data, key_len);
if (IS_ERR(key->u.aes_gmac.tfm)) {
err = PTR_ERR(key->u.aes_gmac.tfm);
kfree(key);
return ERR_PTR(err);
}
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
key->conf.iv_len = IEEE80211_GCMP_HDR_LEN;
key->conf.icv_len = IEEE80211_GCMP_MIC_LEN;
for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++)
for (j = 0; j < IEEE80211_GCMP_PN_LEN; j++)
key->u.gcmp.rx_pn[i][j] =
seq[IEEE80211_GCMP_PN_LEN - j - 1];
/* Initialize AES key state here as an optimization so that
* it does not need to be initialized for every packet.
*/
key->u.gcmp.tfm = ieee80211_aes_gcm_key_setup_encrypt(key_data,
key_len);
if (IS_ERR(key->u.gcmp.tfm)) {
err = PTR_ERR(key->u.gcmp.tfm);
kfree(key);
return ERR_PTR(err);
}
break;
}
memcpy(key->conf.key, key_data, key_len);
INIT_LIST_HEAD(&key->list);
return key;
}
static void ieee80211_key_free_common(struct ieee80211_key *key)
{
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
ieee80211_aes_key_free(key->u.ccmp.tfm);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
ieee80211_aes_gmac_key_free(key->u.aes_gmac.tfm);
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
ieee80211_aes_gcm_key_free(key->u.gcmp.tfm);
break;
}
kfree_sensitive(key);
}
static void __ieee80211_key_destroy(struct ieee80211_key *key,
bool delay_tailroom)
{
if (key->local) {
struct ieee80211_sub_if_data *sdata = key->sdata;
ieee80211_debugfs_key_remove(key);
if (delay_tailroom) {
/* see ieee80211_delayed_tailroom_dec */
sdata->crypto_tx_tailroom_pending_dec++;
schedule_delayed_work(&sdata->dec_tailroom_needed_wk,
HZ/2);
} else {
decrease_tailroom_need_count(sdata, 1);
}
}
ieee80211_key_free_common(key);
}
static void ieee80211_key_destroy(struct ieee80211_key *key,
bool delay_tailroom)
{
if (!key)
return;
/*
* Synchronize so the TX path and rcu key iterators
* can no longer be using this key before we free/remove it.
*/
synchronize_net();
__ieee80211_key_destroy(key, delay_tailroom);
}
void ieee80211_key_free_unused(struct ieee80211_key *key)
{
WARN_ON(key->sdata || key->local);
ieee80211_key_free_common(key);
}
static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
struct ieee80211_key *old,
struct ieee80211_key *new)
{
u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
u8 *tk_old, *tk_new;
if (!old || new->conf.keylen != old->conf.keylen)
return false;
tk_old = old->conf.key;
tk_new = new->conf.key;
/*
* In station mode, don't compare the TX MIC key, as it's never used
* and offloaded rekeying may not care to send it to the host. This
* is the case in iwlwifi, for example.
*/
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
new->conf.keylen == WLAN_KEY_LEN_TKIP &&
!(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
tk_old = tkip_old;
tk_new = tkip_new;
}
return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
}
int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_link_data *link,
struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
static atomic_t key_color = ATOMIC_INIT(0);
struct ieee80211_key *old_key = NULL;
int idx = key->conf.keyidx;
bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
/*
* We want to delay tailroom updates only for station - in that
* case it helps roaming speed, but in other cases it hurts and
* can cause warnings to appear.
*/
bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION;
int ret = -EOPNOTSUPP;
mutex_lock(&sdata->local->key_mtx);
if (sta && pairwise) {
struct ieee80211_key *alt_key;
old_key = key_mtx_dereference(sdata->local, sta->ptk[idx]);
alt_key = key_mtx_dereference(sdata->local, sta->ptk[idx ^ 1]);
/* The rekey code assumes that the old and new key are using
* the same cipher. Enforce the assumption for pairwise keys.
*/
if ((alt_key && alt_key->conf.cipher != key->conf.cipher) ||
(old_key && old_key->conf.cipher != key->conf.cipher))
goto out;
} else if (sta) {
struct link_sta_info *link_sta = &sta->deflink;
int link_id = key->conf.link_id;
if (link_id >= 0) {
link_sta = rcu_dereference_protected(sta->link[link_id],
lockdep_is_held(&sta->local->sta_mtx));
if (!link_sta) {
ret = -ENOLINK;
goto out;
}
}
old_key = key_mtx_dereference(sdata->local, link_sta->gtk[idx]);
} else {
if (idx < NUM_DEFAULT_KEYS)
old_key = key_mtx_dereference(sdata->local,
sdata->keys[idx]);
if (!old_key)
old_key = key_mtx_dereference(sdata->local,
link->gtk[idx]);
}
/* Non-pairwise keys must also not switch the cipher on rekey */
if (!pairwise) {
if (old_key && old_key->conf.cipher != key->conf.cipher)
goto out;
}
/*
* Silently accept key re-installation without really installing the
* new version of the key to avoid nonce reuse or replay issues.
*/
if (ieee80211_key_identical(sdata, old_key, key)) {
ieee80211_key_free_unused(key);
ret = 0;
goto out;
}
key->local = sdata->local;
key->sdata = sdata;
key->sta = sta;
/*
* Assign a unique ID to every key so we can easily prevent mixed
* key and fragment cache attacks.
*/
key->color = atomic_inc_return(&key_color);
increment_tailroom_need_count(sdata);
ret = ieee80211_key_replace(sdata, link, sta, pairwise, old_key, key);
if (!ret) {
ieee80211_debugfs_key_add(key);
ieee80211_key_destroy(old_key, delay_tailroom);
} else {
ieee80211_key_free(key, delay_tailroom);
}
out:
mutex_unlock(&sdata->local->key_mtx);
return ret;
}
void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom)
{
if (!key)
return;
/*
* Replace key with nothingness if it was ever used.
*/
if (key->sdata)
ieee80211_key_replace(key->sdata, NULL, key->sta,
key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
key, NULL);
ieee80211_key_destroy(key, delay_tailroom);
}
void ieee80211_reenable_keys(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_key *key;
struct ieee80211_sub_if_data *vlan;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
mutex_lock(&sdata->local->key_mtx);
sdata->crypto_tx_tailroom_needed_cnt = 0;
sdata->crypto_tx_tailroom_pending_dec = 0;
if (sdata->vif.type == NL80211_IFTYPE_AP) {
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
vlan->crypto_tx_tailroom_needed_cnt = 0;
vlan->crypto_tx_tailroom_pending_dec = 0;
}
}
if (ieee80211_sdata_running(sdata)) {
list_for_each_entry(key, &sdata->key_list, list) {
increment_tailroom_need_count(sdata);
ieee80211_key_enable_hw_accel(key);
}
}
mutex_unlock(&sdata->local->key_mtx);
}
void ieee80211_iter_keys(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
void (*iter)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key,
void *data),
void *iter_data)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_key *key, *tmp;
struct ieee80211_sub_if_data *sdata;
lockdep_assert_wiphy(hw->wiphy);
mutex_lock(&local->key_mtx);
if (vif) {
sdata = vif_to_sdata(vif);
list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
iter(hw, &sdata->vif,
key->sta ? &key->sta->sta : NULL,
&key->conf, iter_data);
} else {
list_for_each_entry(sdata, &local->interfaces, list)
list_for_each_entry_safe(key, tmp,
&sdata->key_list, list)
iter(hw, &sdata->vif,
key->sta ? &key->sta->sta : NULL,
&key->conf, iter_data);
}
mutex_unlock(&local->key_mtx);
}
EXPORT_SYMBOL(ieee80211_iter_keys);
static void
_ieee80211_iter_keys_rcu(struct ieee80211_hw *hw,
struct ieee80211_sub_if_data *sdata,
void (*iter)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key,
void *data),
void *iter_data)
{
struct ieee80211_key *key;
list_for_each_entry_rcu(key, &sdata->key_list, list) {
/* skip keys of station in removal process */
if (key->sta && key->sta->removed)
continue;
if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
continue;
iter(hw, &sdata->vif,
key->sta ? &key->sta->sta : NULL,
&key->conf, iter_data);
}
}
void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
void (*iter)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key,
void *data),
void *iter_data)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
if (vif) {
sdata = vif_to_sdata(vif);
_ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data);
} else {
list_for_each_entry_rcu(sdata, &local->interfaces, list)
_ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data);
}
}
EXPORT_SYMBOL(ieee80211_iter_keys_rcu);
static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata,
struct list_head *keys)
{
struct ieee80211_key *key, *tmp;
decrease_tailroom_need_count(sdata,
sdata->crypto_tx_tailroom_pending_dec);
sdata->crypto_tx_tailroom_pending_dec = 0;
ieee80211_debugfs_key_remove_mgmt_default(sdata);
ieee80211_debugfs_key_remove_beacon_default(sdata);
list_for_each_entry_safe(key, tmp, &sdata->key_list, list) {
ieee80211_key_replace(key->sdata, NULL, key->sta,
key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
key, NULL);
list_add_tail(&key->list, keys);
}
ieee80211_debugfs_key_update_default(sdata);
}
void ieee80211_remove_link_keys(struct ieee80211_link_data *link,
struct list_head *keys)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_key *key, *tmp;
mutex_lock(&local->key_mtx);
list_for_each_entry_safe(key, tmp, &sdata->key_list, list) {
if (key->conf.link_id != link->link_id)
continue;
ieee80211_key_replace(key->sdata, link, key->sta,
key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
key, NULL);
list_add_tail(&key->list, keys);
}
mutex_unlock(&local->key_mtx);
}
void ieee80211_free_key_list(struct ieee80211_local *local,
struct list_head *keys)
{
struct ieee80211_key *key, *tmp;
mutex_lock(&local->key_mtx);
list_for_each_entry_safe(key, tmp, keys, list)
__ieee80211_key_destroy(key, false);
mutex_unlock(&local->key_mtx);
}
void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
bool force_synchronize)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *vlan;
struct ieee80211_sub_if_data *master;
struct ieee80211_key *key, *tmp;
LIST_HEAD(keys);
cancel_delayed_work_sync(&sdata->dec_tailroom_needed_wk);
mutex_lock(&local->key_mtx);
ieee80211_free_keys_iface(sdata, &keys);
if (sdata->vif.type == NL80211_IFTYPE_AP) {
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
ieee80211_free_keys_iface(vlan, &keys);
}
if (!list_empty(&keys) || force_synchronize)
synchronize_net();
list_for_each_entry_safe(key, tmp, &keys, list)
__ieee80211_key_destroy(key, false);
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
if (sdata->bss) {
master = container_of(sdata->bss,
struct ieee80211_sub_if_data,
u.ap);
WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt !=
master->crypto_tx_tailroom_needed_cnt);
}
} else {
WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
sdata->crypto_tx_tailroom_pending_dec);
}
if (sdata->vif.type == NL80211_IFTYPE_AP) {
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
vlan->crypto_tx_tailroom_pending_dec);
}
mutex_unlock(&local->key_mtx);
}
void ieee80211_free_sta_keys(struct ieee80211_local *local,
struct sta_info *sta)
{
struct ieee80211_key *key;
int i;
mutex_lock(&local->key_mtx);
for (i = 0; i < ARRAY_SIZE(sta->deflink.gtk); i++) {
key = key_mtx_dereference(local, sta->deflink.gtk[i]);
if (!key)
continue;
ieee80211_key_replace(key->sdata, NULL, key->sta,
key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
key, NULL);
__ieee80211_key_destroy(key, key->sdata->vif.type ==
NL80211_IFTYPE_STATION);
}
for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
key = key_mtx_dereference(local, sta->ptk[i]);
if (!key)
continue;
ieee80211_key_replace(key->sdata, NULL, key->sta,
key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
key, NULL);
__ieee80211_key_destroy(key, key->sdata->vif.type ==
NL80211_IFTYPE_STATION);
}
mutex_unlock(&local->key_mtx);
}
void ieee80211_delayed_tailroom_dec(struct work_struct *wk)
{
struct ieee80211_sub_if_data *sdata;
sdata = container_of(wk, struct ieee80211_sub_if_data,
dec_tailroom_needed_wk.work);
/*
* The reason for the delayed tailroom needed decrementing is to
* make roaming faster: during roaming, all keys are first deleted
* and then new keys are installed. The first new key causes the
* crypto_tx_tailroom_needed_cnt to go from 0 to 1, which invokes
* the cost of synchronize_net() (which can be slow). Avoid this
* by deferring the crypto_tx_tailroom_needed_cnt decrementing on
* key removal for a while, so if we roam the value is larger than
* zero and no 0->1 transition happens.
*
* The cost is that if the AP switching was from an AP with keys
* to one without, we still allocate tailroom while it would no
* longer be needed. However, in the typical (fast) roaming case
* within an ESS this usually won't happen.
*/
mutex_lock(&sdata->local->key_mtx);
decrease_tailroom_need_count(sdata,
sdata->crypto_tx_tailroom_pending_dec);
sdata->crypto_tx_tailroom_pending_dec = 0;
mutex_unlock(&sdata->local->key_mtx);
}
void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
trace_api_gtk_rekey_notify(sdata, bssid, replay_ctr);
cfg80211_gtk_rekey_notify(sdata->dev, bssid, replay_ctr, gfp);
}
EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_notify);
void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
int tid, struct ieee80211_key_seq *seq)
{
struct ieee80211_key *key;
const u8 *pn;
key = container_of(keyconf, struct ieee80211_key, conf);
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS))
return;
seq->tkip.iv32 = key->u.tkip.rx[tid].iv32;
seq->tkip.iv16 = key->u.tkip.rx[tid].iv16;
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
return;
if (tid < 0)
pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS];
else
pn = key->u.ccmp.rx_pn[tid];
memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
if (WARN_ON(tid != 0))
return;
pn = key->u.aes_cmac.rx_pn;
memcpy(seq->aes_cmac.pn, pn, IEEE80211_CMAC_PN_LEN);
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
if (WARN_ON(tid != 0))
return;
pn = key->u.aes_gmac.rx_pn;
memcpy(seq->aes_gmac.pn, pn, IEEE80211_GMAC_PN_LEN);
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
return;
if (tid < 0)
pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS];
else
pn = key->u.gcmp.rx_pn[tid];
memcpy(seq->gcmp.pn, pn, IEEE80211_GCMP_PN_LEN);
break;
}
}
EXPORT_SYMBOL(ieee80211_get_key_rx_seq);
void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
int tid, struct ieee80211_key_seq *seq)
{
struct ieee80211_key *key;
u8 *pn;
key = container_of(keyconf, struct ieee80211_key, conf);
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS))
return;
key->u.tkip.rx[tid].iv32 = seq->tkip.iv32;
key->u.tkip.rx[tid].iv16 = seq->tkip.iv16;
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
return;
if (tid < 0)
pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS];
else
pn = key->u.ccmp.rx_pn[tid];
memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
if (WARN_ON(tid != 0))
return;
pn = key->u.aes_cmac.rx_pn;
memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN);
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
if (WARN_ON(tid != 0))
return;
pn = key->u.aes_gmac.rx_pn;
memcpy(pn, seq->aes_gmac.pn, IEEE80211_GMAC_PN_LEN);
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
return;
if (tid < 0)
pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS];
else
pn = key->u.gcmp.rx_pn[tid];
memcpy(pn, seq->gcmp.pn, IEEE80211_GCMP_PN_LEN);
break;
default:
WARN_ON(1);
break;
}
}
EXPORT_SYMBOL_GPL(ieee80211_set_key_rx_seq);
void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
{
struct ieee80211_key *key;
key = container_of(keyconf, struct ieee80211_key, conf);
assert_key_lock(key->local);
/*
* if key was uploaded, we assume the driver will/has remove(d)
* it, so adjust bookkeeping accordingly
*/
if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
IEEE80211_KEY_FLAG_PUT_MIC_SPACE |
IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
increment_tailroom_need_count(key->sdata);
}
ieee80211_key_free(key, false);
}
EXPORT_SYMBOL_GPL(ieee80211_remove_key);
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
struct ieee80211_key_conf *keyconf)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_key *key;
int err;
if (WARN_ON(!local->wowlan))
return ERR_PTR(-EINVAL);
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
return ERR_PTR(-EINVAL);
key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx,
keyconf->keylen, keyconf->key,
0, NULL);
if (IS_ERR(key))
return ERR_CAST(key);
if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
/* FIXME: this function needs to get a link ID */
err = ieee80211_key_link(key, &sdata->deflink, NULL);
if (err)
return ERR_PTR(err);
return &key->conf;
}
EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_add);
void ieee80211_key_mic_failure(struct ieee80211_key_conf *keyconf)
{
struct ieee80211_key *key;
key = container_of(keyconf, struct ieee80211_key, conf);
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
key->u.aes_cmac.icverrors++;
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
key->u.aes_gmac.icverrors++;
break;
default:
/* ignore the others for now, we don't keep counters now */
break;
}
}
EXPORT_SYMBOL_GPL(ieee80211_key_mic_failure);
void ieee80211_key_replay(struct ieee80211_key_conf *keyconf)
{
struct ieee80211_key *key;
key = container_of(keyconf, struct ieee80211_key, conf);
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
key->u.ccmp.replays++;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
key->u.aes_cmac.replays++;
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
key->u.aes_gmac.replays++;
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
key->u.gcmp.replays++;
break;
}
}
EXPORT_SYMBOL_GPL(ieee80211_key_replay);
int ieee80211_key_switch_links(struct ieee80211_sub_if_data *sdata,
unsigned long del_links_mask,
unsigned long add_links_mask)
{
struct ieee80211_key *key;
int ret;
list_for_each_entry(key, &sdata->key_list, list) {
if (key->conf.link_id < 0 ||
!(del_links_mask & BIT(key->conf.link_id)))
continue;
/* shouldn't happen for per-link keys */
WARN_ON(key->sta);
ieee80211_key_disable_hw_accel(key);
}
list_for_each_entry(key, &sdata->key_list, list) {
if (key->conf.link_id < 0 ||
!(add_links_mask & BIT(key->conf.link_id)))
continue;
/* shouldn't happen for per-link keys */
WARN_ON(key->sta);
ret = ieee80211_key_enable_hw_accel(key);
if (ret)
return ret;
}
return 0;
}
| linux-master | net/mac80211/key.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2011-2012, Pavel Zubarev <[email protected]>
* Copyright 2011-2012, Marco Porsch <[email protected]>
* Copyright 2011-2012, cozybit Inc.
* Copyright (C) 2021 Intel Corporation
*/
#include "ieee80211_i.h"
#include "mesh.h"
#include "driver-ops.h"
/* This is not in the standard. It represents a tolerable tsf drift below
* which we do no TSF adjustment.
*/
#define TOFFSET_MINIMUM_ADJUSTMENT 10
/* This is not in the standard. It is a margin added to the
* Toffset setpoint to mitigate TSF overcorrection
* introduced by TSF adjustment latency.
*/
#define TOFFSET_SET_MARGIN 20
/* This is not in the standard. It represents the maximum Toffset jump above
* which we'll invalidate the Toffset setpoint and choose a new setpoint. This
* could be, for instance, in case a neighbor is restarted and its TSF counter
* reset.
*/
#define TOFFSET_MAXIMUM_ADJUSTMENT 800 /* 0.8 ms */
struct sync_method {
u8 method;
struct ieee80211_mesh_sync_ops ops;
};
/**
* mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT
*
* @cfg: mesh config element from the mesh peer (or %NULL)
*/
static bool mesh_peer_tbtt_adjusting(const struct ieee80211_meshconf_ie *cfg)
{
return cfg &&
(cfg->meshconf_cap & IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING);
}
void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
/* sdata->vif.bss_conf.beacon_int in 1024us units, 0.04% */
u64 beacon_int_fraction = sdata->vif.bss_conf.beacon_int * 1024 / 2500;
u64 tsf;
u64 tsfdelta;
spin_lock_bh(&ifmsh->sync_offset_lock);
if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting\n",
(long long) ifmsh->sync_offset_clockdrift_max);
tsfdelta = -ifmsh->sync_offset_clockdrift_max;
ifmsh->sync_offset_clockdrift_max = 0;
} else {
msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting by %llu\n",
(long long) ifmsh->sync_offset_clockdrift_max,
(unsigned long long) beacon_int_fraction);
tsfdelta = -beacon_int_fraction;
ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction;
}
spin_unlock_bh(&ifmsh->sync_offset_lock);
if (local->ops->offset_tsf) {
drv_offset_tsf(local, sdata, tsfdelta);
} else {
tsf = drv_get_tsf(local, sdata);
if (tsf != -1ULL)
drv_set_tsf(local, sdata, tsf + tsfdelta);
}
}
static void
mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, u16 stype,
struct ieee80211_mgmt *mgmt, unsigned int len,
const struct ieee80211_meshconf_ie *mesh_cfg,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
u64 t_t, t_r;
WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
/* standard mentions only beacons */
if (stype != IEEE80211_STYPE_BEACON)
return;
/*
* Get time when timestamp field was received. If we don't
* have rx timestamps, then use current tsf as an approximation.
* drv_get_tsf() must be called before entering the rcu-read
* section.
*/
if (ieee80211_have_rx_timestamp(rx_status))
t_r = ieee80211_calculate_rx_timestamp(local, rx_status,
len + FCS_LEN, 24);
else
t_r = drv_get_tsf(local, sdata);
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
if (!sta)
goto no_sync;
/* check offset sync conditions (13.13.2.2.1)
*
* TODO also sync to
* dot11MeshNbrOffsetMaxNeighbor non-peer non-MBSS neighbors
*/
if (mesh_peer_tbtt_adjusting(mesh_cfg)) {
msync_dbg(sdata, "STA %pM : is adjusting TBTT\n",
sta->sta.addr);
goto no_sync;
}
/* Timing offset calculation (see 13.13.2.2.2) */
t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
sta->mesh->t_offset = t_t - t_r;
if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
s64 t_clockdrift = sta->mesh->t_offset_setpoint - sta->mesh->t_offset;
msync_dbg(sdata,
"STA %pM : t_offset=%lld, t_offset_setpoint=%lld, t_clockdrift=%lld\n",
sta->sta.addr, (long long) sta->mesh->t_offset,
(long long) sta->mesh->t_offset_setpoint,
(long long) t_clockdrift);
if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) {
msync_dbg(sdata,
"STA %pM : t_clockdrift=%lld too large, setpoint reset\n",
sta->sta.addr,
(long long) t_clockdrift);
clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
goto no_sync;
}
spin_lock_bh(&ifmsh->sync_offset_lock);
if (t_clockdrift > ifmsh->sync_offset_clockdrift_max)
ifmsh->sync_offset_clockdrift_max = t_clockdrift;
spin_unlock_bh(&ifmsh->sync_offset_lock);
} else {
sta->mesh->t_offset_setpoint = sta->mesh->t_offset - TOFFSET_SET_MARGIN;
set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
msync_dbg(sdata,
"STA %pM : offset was invalid, t_offset=%lld\n",
sta->sta.addr,
(long long) sta->mesh->t_offset);
}
no_sync:
rcu_read_unlock();
}
static void mesh_sync_offset_adjust_tsf(struct ieee80211_sub_if_data *sdata,
struct beacon_data *beacon)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
WARN_ON(!rcu_read_lock_held());
spin_lock_bh(&ifmsh->sync_offset_lock);
if (ifmsh->sync_offset_clockdrift_max > TOFFSET_MINIMUM_ADJUSTMENT) {
/* Since ajusting the tsf here would
* require a possibly blocking call
* to the driver tsf setter, we punt
* the tsf adjustment to the mesh tasklet
*/
msync_dbg(sdata,
"TSF : kicking off TSF adjustment with clockdrift_max=%lld\n",
ifmsh->sync_offset_clockdrift_max);
set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags);
} else {
msync_dbg(sdata,
"TSF : max clockdrift=%lld; too small to adjust\n",
(long long)ifmsh->sync_offset_clockdrift_max);
ifmsh->sync_offset_clockdrift_max = 0;
}
spin_unlock_bh(&ifmsh->sync_offset_lock);
}
static const struct sync_method sync_methods[] = {
{
.method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
.ops = {
.rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp,
.adjust_tsf = &mesh_sync_offset_adjust_tsf,
}
},
};
const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method)
{
int i;
for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) {
if (sync_methods[i].method == method)
return &sync_methods[i].ops;
}
return NULL;
}
| linux-master | net/mac80211/mesh_sync.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* EHT handling
*
* Copyright(c) 2021-2023 Intel Corporation
*/
#include "ieee80211_i.h"
void
ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const u8 *he_cap_ie, u8 he_cap_len,
const struct ieee80211_eht_cap_elem *eht_cap_ie_elem,
u8 eht_cap_len,
struct link_sta_info *link_sta)
{
struct ieee80211_sta_eht_cap *eht_cap = &link_sta->pub->eht_cap;
struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
u8 eht_ppe_size = 0;
u8 mcs_nss_size;
u8 eht_total_size = sizeof(eht_cap->eht_cap_elem);
u8 *pos = (u8 *)eht_cap_ie_elem;
memset(eht_cap, 0, sizeof(*eht_cap));
if (!eht_cap_ie_elem ||
!ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif))
return;
mcs_nss_size = ieee80211_eht_mcs_nss_size(he_cap_ie_elem,
&eht_cap_ie_elem->fixed,
sdata->vif.type ==
NL80211_IFTYPE_STATION);
eht_total_size += mcs_nss_size;
/* Calculate the PPE thresholds length only if the header is present */
if (eht_cap_ie_elem->fixed.phy_cap_info[5] &
IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
u16 eht_ppe_hdr;
if (eht_cap_len < eht_total_size + sizeof(u16))
return;
eht_ppe_hdr = get_unaligned_le16(eht_cap_ie_elem->optional + mcs_nss_size);
eht_ppe_size =
ieee80211_eht_ppe_size(eht_ppe_hdr,
eht_cap_ie_elem->fixed.phy_cap_info);
eht_total_size += eht_ppe_size;
/* we calculate as if NSS > 8 are valid, but don't handle that */
if (eht_ppe_size > sizeof(eht_cap->eht_ppe_thres))
return;
}
if (eht_cap_len < eht_total_size)
return;
/* Copy the static portion of the EHT capabilities */
memcpy(&eht_cap->eht_cap_elem, pos, sizeof(eht_cap->eht_cap_elem));
pos += sizeof(eht_cap->eht_cap_elem);
/* Copy MCS/NSS which depends on the peer capabilities */
memset(&eht_cap->eht_mcs_nss_supp, 0,
sizeof(eht_cap->eht_mcs_nss_supp));
memcpy(&eht_cap->eht_mcs_nss_supp, pos, mcs_nss_size);
if (eht_ppe_size)
memcpy(eht_cap->eht_ppe_thres,
&eht_cap_ie_elem->optional[mcs_nss_size],
eht_ppe_size);
eht_cap->has_eht = true;
link_sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(link_sta);
link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
}
| linux-master | net/mac80211/eht.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
* Copyright (C) 2019, 2021-2023 Intel Corporation
* Author: Luis Carlos Cobo <[email protected]>
*/
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "wme.h"
#include "mesh.h"
#define TEST_FRAME_LEN 8192
#define MAX_METRIC 0xffffffff
#define ARITH_SHIFT 8
#define LINK_FAIL_THRESH 95
#define MAX_PREQ_QUEUE_LEN 64
static void mesh_queue_preq(struct mesh_path *, u8);
static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
{
if (ae)
offset += 6;
return get_unaligned_le32(preq_elem + offset);
}
static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
{
if (ae)
offset += 6;
return get_unaligned_le16(preq_elem + offset);
}
/* HWMP IE processing macros */
#define AE_F (1<<6)
#define AE_F_SET(x) (*x & AE_F)
#define PREQ_IE_FLAGS(x) (*(x))
#define PREQ_IE_HOPCOUNT(x) (*(x + 1))
#define PREQ_IE_TTL(x) (*(x + 2))
#define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0)
#define PREQ_IE_ORIG_ADDR(x) (x + 7)
#define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0)
#define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x))
#define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x))
#define PREQ_IE_TARGET_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
#define PREQ_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
#define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x))
#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
#define PREP_IE_TTL(x) PREQ_IE_TTL(x)
#define PREP_IE_ORIG_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
#define PREP_IE_ORIG_SN(x) u32_field_get(x, 27, AE_F_SET(x))
#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x))
#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x))
#define PREP_IE_TARGET_ADDR(x) (x + 3)
#define PREP_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
#define PERR_IE_TTL(x) (*(x))
#define PERR_IE_TARGET_FLAGS(x) (*(x + 2))
#define PERR_IE_TARGET_ADDR(x) (x + 3)
#define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0)
#define MSEC_TO_TU(x) (x*1000/1024)
#define SN_GT(x, y) ((s32)(y - x) < 0)
#define SN_LT(x, y) ((s32)(x - y) < 0)
#define MAX_SANE_SN_DELTA 32
static inline u32 SN_DELTA(u32 x, u32 y)
{
return x >= y ? x - y : y - x;
}
#define net_traversal_jiffies(s) \
msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
#define default_lifetime(s) \
MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
#define min_preq_int_jiff(s) \
(msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
#define disc_timeout_jiff(s) \
msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
#define root_path_confirmation_jiffies(s) \
msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
enum mpath_frame_type {
MPATH_PREQ = 0,
MPATH_PREP,
MPATH_PERR,
MPATH_RANN
};
static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
const u8 *orig_addr, u32 orig_sn,
u8 target_flags, const u8 *target,
u32 target_sn, const u8 *da,
u8 hop_count, u8 ttl,
u32 lifetime, u32 metric, u32 preq_id,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
u8 *pos, ie_len;
int hdr_len = offsetofend(struct ieee80211_mgmt,
u.action.u.mesh_action);
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
2 + 37); /* max HWMP IE */
if (!skb)
return -1;
skb_reserve(skb, local->tx_headroom);
mgmt = skb_put_zero(skb, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
mgmt->u.action.u.mesh_action.action_code =
WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
switch (action) {
case MPATH_PREQ:
mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
ie_len = 37;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREQ;
break;
case MPATH_PREP:
mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr);
ie_len = 31;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREP;
break;
case MPATH_RANN:
mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
ie_len = sizeof(struct ieee80211_rann_ie);
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_RANN;
break;
default:
kfree_skb(skb);
return -ENOTSUPP;
}
*pos++ = ie_len;
*pos++ = flags;
*pos++ = hop_count;
*pos++ = ttl;
if (action == MPATH_PREP) {
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
put_unaligned_le32(target_sn, pos);
pos += 4;
} else {
if (action == MPATH_PREQ) {
put_unaligned_le32(preq_id, pos);
pos += 4;
}
memcpy(pos, orig_addr, ETH_ALEN);
pos += ETH_ALEN;
put_unaligned_le32(orig_sn, pos);
pos += 4;
}
put_unaligned_le32(lifetime, pos); /* interval for RANN */
pos += 4;
put_unaligned_le32(metric, pos);
pos += 4;
if (action == MPATH_PREQ) {
*pos++ = 1; /* destination count */
*pos++ = target_flags;
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
put_unaligned_le32(target_sn, pos);
pos += 4;
} else if (action == MPATH_PREP) {
memcpy(pos, orig_addr, ETH_ALEN);
pos += ETH_ALEN;
put_unaligned_le32(orig_sn, pos);
pos += 4;
}
ieee80211_tx_skb(sdata, skb);
return 0;
}
/* Headroom is not adjusted. Caller should ensure that skb has sufficient
* headroom in case the frame is encrypted. */
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
skb->priority = 7;
info->control.vif = &sdata->vif;
info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
ieee80211_set_qos_hdr(sdata, skb);
ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
}
/**
* mesh_path_error_tx - Sends a PERR mesh management frame
*
* @ttl: allowed remaining hops
* @target: broken destination
* @target_sn: SN of the broken destination
* @target_rcode: reason code for this PERR
* @ra: node this frame is addressed to
* @sdata: local mesh subif
*
* Note: This function may be called with driver locks taken that the driver
* also acquires in the TX path. To avoid a deadlock we don't transmit the
* frame directly but add it to the pending queue instead.
*/
int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
u8 ttl, const u8 *target, u32 target_sn,
u16 target_rcode, const u8 *ra)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_mgmt *mgmt;
u8 *pos, ie_len;
int hdr_len = offsetofend(struct ieee80211_mgmt,
u.action.u.mesh_action);
if (time_before(jiffies, ifmsh->next_perr))
return -EAGAIN;
skb = dev_alloc_skb(local->tx_headroom +
IEEE80211_ENCRYPT_HEADROOM +
IEEE80211_ENCRYPT_TAILROOM +
hdr_len +
2 + 15 /* PERR IE */);
if (!skb)
return -1;
skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
mgmt = skb_put_zero(skb, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, ra, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
mgmt->u.action.u.mesh_action.action_code =
WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
ie_len = 15;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PERR;
*pos++ = ie_len;
/* ttl */
*pos++ = ttl;
/* number of destinations */
*pos++ = 1;
/* Flags field has AE bit only as defined in
* sec 8.4.2.117 IEEE802.11-2012
*/
*pos = 0;
pos++;
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
put_unaligned_le32(target_sn, pos);
pos += 4;
put_unaligned_le16(target_rcode, pos);
/* see note in function header */
prepare_frame_for_deferred_tx(sdata, skb);
ifmsh->next_perr = TU_TO_EXP_TIME(
ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
ieee80211_add_pending_skb(local, skb);
return 0;
}
void ieee80211s_update_metric(struct ieee80211_local *local,
struct sta_info *sta,
struct ieee80211_tx_status *st)
{
struct ieee80211_tx_info *txinfo = st->info;
int failed;
struct rate_info rinfo;
failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
/* moving average, scaled to 100.
* feed failure as 100 and success as 0
*/
ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, failed * 100);
if (ewma_mesh_fail_avg_read(&sta->mesh->fail_avg) >
LINK_FAIL_THRESH)
mesh_plink_broken(sta);
/* use rate info set by the driver directly if present */
if (st->n_rates)
rinfo = sta->deflink.tx_stats.last_rate_info;
else
sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, &rinfo);
ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg,
cfg80211_calculate_bitrate(&rinfo));
}
u32 airtime_link_metric_get(struct ieee80211_local *local,
struct sta_info *sta)
{
/* This should be adjusted for each device */
int device_constant = 1 << ARITH_SHIFT;
int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
int s_unit = 1 << ARITH_SHIFT;
int rate, err;
u32 tx_time, estimated_retx;
u64 result;
unsigned long fail_avg =
ewma_mesh_fail_avg_read(&sta->mesh->fail_avg);
if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
return MAX_METRIC;
/* Try to get rate based on HW/SW RC algorithm.
* Rate is returned in units of Kbps, correct this
* to comply with airtime calculation units
* Round up in case we get rate < 100Kbps
*/
rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100);
if (rate) {
err = 0;
} else {
if (fail_avg > LINK_FAIL_THRESH)
return MAX_METRIC;
rate = ewma_mesh_tx_rate_avg_read(&sta->mesh->tx_rate_avg);
if (WARN_ON(!rate))
return MAX_METRIC;
err = (fail_avg << ARITH_SHIFT) / 100;
}
/* bitrate is in units of 100 Kbps, while we need rate in units of
* 1Mbps. This will be corrected on tx_time computation.
*/
tx_time = (device_constant + 10 * test_frame_len / rate);
estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
return (u32)result;
}
/**
* hwmp_route_info_get - Update routing info to originator and transmitter
*
* @sdata: local mesh subif
* @mgmt: mesh management frame
* @hwmp_ie: hwmp information element (PREP or PREQ)
* @action: type of hwmp ie
*
* This function updates the path routing information to the originator and the
* transmitter of a HWMP PREQ or PREP frame.
*
* Returns: metric to frame originator or 0 if the frame should not be further
* processed
*
* Notes: this function is the only place (besides user-provided info) where
* path routing information is updated.
*/
static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
const u8 *hwmp_ie, enum mpath_frame_type action)
{
struct ieee80211_local *local = sdata->local;
struct mesh_path *mpath;
struct sta_info *sta;
bool fresh_info;
const u8 *orig_addr, *ta;
u32 orig_sn, orig_metric;
unsigned long orig_lifetime, exp_time;
u32 last_hop_metric, new_metric;
bool flush_mpath = false;
bool process = true;
u8 hopcount;
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
if (!sta) {
rcu_read_unlock();
return 0;
}
last_hop_metric = airtime_link_metric_get(local, sta);
/* Update and check originator routing info */
fresh_info = true;
switch (action) {
case MPATH_PREQ:
orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
orig_metric = PREQ_IE_METRIC(hwmp_ie);
hopcount = PREQ_IE_HOPCOUNT(hwmp_ie) + 1;
break;
case MPATH_PREP:
/* Originator here refers to the MP that was the target in the
* Path Request. We divert from the nomenclature in the draft
* so that we can easily use a single function to gather path
* information from both PREQ and PREP frames.
*/
orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
orig_metric = PREP_IE_METRIC(hwmp_ie);
hopcount = PREP_IE_HOPCOUNT(hwmp_ie) + 1;
break;
default:
rcu_read_unlock();
return 0;
}
new_metric = orig_metric + last_hop_metric;
if (new_metric < orig_metric)
new_metric = MAX_METRIC;
exp_time = TU_TO_EXP_TIME(orig_lifetime);
if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
/* This MP is the originator, we are not interested in this
* frame, except for updating transmitter's path info.
*/
process = false;
fresh_info = false;
} else {
mpath = mesh_path_lookup(sdata, orig_addr);
if (mpath) {
spin_lock_bh(&mpath->state_lock);
if (mpath->flags & MESH_PATH_FIXED)
fresh_info = false;
else if ((mpath->flags & MESH_PATH_ACTIVE) &&
(mpath->flags & MESH_PATH_SN_VALID)) {
if (SN_GT(mpath->sn, orig_sn) ||
(mpath->sn == orig_sn &&
(rcu_access_pointer(mpath->next_hop) !=
sta ?
mult_frac(new_metric, 10, 9) :
new_metric) >= mpath->metric)) {
process = false;
fresh_info = false;
}
} else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
bool have_sn, newer_sn, bounced;
have_sn = mpath->flags & MESH_PATH_SN_VALID;
newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
bounced = have_sn &&
(SN_DELTA(orig_sn, mpath->sn) >
MAX_SANE_SN_DELTA);
if (!have_sn || newer_sn) {
/* if SN is newer than what we had
* then we can take it */;
} else if (bounced) {
/* if SN is way different than what
* we had then assume the other side
* rebooted or restarted */;
} else {
process = false;
fresh_info = false;
}
}
} else {
mpath = mesh_path_add(sdata, orig_addr);
if (IS_ERR(mpath)) {
rcu_read_unlock();
return 0;
}
spin_lock_bh(&mpath->state_lock);
}
if (fresh_info) {
if (rcu_access_pointer(mpath->next_hop) != sta) {
mpath->path_change_count++;
flush_mpath = true;
}
mesh_path_assign_nexthop(mpath, sta);
mpath->flags |= MESH_PATH_SN_VALID;
mpath->metric = new_metric;
mpath->sn = orig_sn;
mpath->exp_time = time_after(mpath->exp_time, exp_time)
? mpath->exp_time : exp_time;
mpath->hop_count = hopcount;
mesh_path_activate(mpath);
spin_unlock_bh(&mpath->state_lock);
if (flush_mpath)
mesh_fast_tx_flush_mpath(mpath);
ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
/* init it at a low value - 0 start is tricky */
ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
mesh_path_tx_pending(mpath);
/* draft says preq_id should be saved to, but there does
* not seem to be any use for it, skipping by now
*/
} else
spin_unlock_bh(&mpath->state_lock);
}
/* Update and check transmitter routing info */
ta = mgmt->sa;
if (ether_addr_equal(orig_addr, ta))
fresh_info = false;
else {
fresh_info = true;
mpath = mesh_path_lookup(sdata, ta);
if (mpath) {
spin_lock_bh(&mpath->state_lock);
if ((mpath->flags & MESH_PATH_FIXED) ||
((mpath->flags & MESH_PATH_ACTIVE) &&
((rcu_access_pointer(mpath->next_hop) != sta ?
mult_frac(last_hop_metric, 10, 9) :
last_hop_metric) > mpath->metric)))
fresh_info = false;
} else {
mpath = mesh_path_add(sdata, ta);
if (IS_ERR(mpath)) {
rcu_read_unlock();
return 0;
}
spin_lock_bh(&mpath->state_lock);
}
if (fresh_info) {
if (rcu_access_pointer(mpath->next_hop) != sta) {
mpath->path_change_count++;
flush_mpath = true;
}
mesh_path_assign_nexthop(mpath, sta);
mpath->metric = last_hop_metric;
mpath->exp_time = time_after(mpath->exp_time, exp_time)
? mpath->exp_time : exp_time;
mpath->hop_count = 1;
mesh_path_activate(mpath);
spin_unlock_bh(&mpath->state_lock);
if (flush_mpath)
mesh_fast_tx_flush_mpath(mpath);
ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
/* init it at a low value - 0 start is tricky */
ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
mesh_path_tx_pending(mpath);
} else
spin_unlock_bh(&mpath->state_lock);
}
rcu_read_unlock();
return process ? new_metric : 0;
}
static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
const u8 *preq_elem, u32 orig_metric)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_path *mpath = NULL;
const u8 *target_addr, *orig_addr;
const u8 *da;
u8 target_flags, ttl, flags;
u32 orig_sn, target_sn, lifetime, target_metric = 0;
bool reply = false;
bool forward = true;
bool root_is_gate;
/* Update target SN, if present */
target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
target_sn = PREQ_IE_TARGET_SN(preq_elem);
orig_sn = PREQ_IE_ORIG_SN(preq_elem);
target_flags = PREQ_IE_TARGET_F(preq_elem);
/* Proactive PREQ gate announcements */
flags = PREQ_IE_FLAGS(preq_elem);
root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr);
if (ether_addr_equal(target_addr, sdata->vif.addr)) {
mhwmp_dbg(sdata, "PREQ is for us\n");
forward = false;
reply = true;
target_metric = 0;
if (SN_GT(target_sn, ifmsh->sn))
ifmsh->sn = target_sn;
if (time_after(jiffies, ifmsh->last_sn_update +
net_traversal_jiffies(sdata)) ||
time_before(jiffies, ifmsh->last_sn_update)) {
++ifmsh->sn;
ifmsh->last_sn_update = jiffies;
}
target_sn = ifmsh->sn;
} else if (is_broadcast_ether_addr(target_addr) &&
(target_flags & IEEE80211_PREQ_TO_FLAG)) {
rcu_read_lock();
mpath = mesh_path_lookup(sdata, orig_addr);
if (mpath) {
if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
reply = true;
target_addr = sdata->vif.addr;
target_sn = ++ifmsh->sn;
target_metric = 0;
ifmsh->last_sn_update = jiffies;
}
if (root_is_gate)
mesh_path_add_gate(mpath);
}
rcu_read_unlock();
} else {
rcu_read_lock();
mpath = mesh_path_lookup(sdata, target_addr);
if (mpath) {
if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
SN_LT(mpath->sn, target_sn)) {
mpath->sn = target_sn;
mpath->flags |= MESH_PATH_SN_VALID;
} else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
(mpath->flags & MESH_PATH_ACTIVE)) {
reply = true;
target_metric = mpath->metric;
target_sn = mpath->sn;
/* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
target_flags |= IEEE80211_PREQ_TO_FLAG;
}
}
rcu_read_unlock();
}
if (reply) {
lifetime = PREQ_IE_LIFETIME(preq_elem);
ttl = ifmsh->mshcfg.element_ttl;
if (ttl != 0) {
mhwmp_dbg(sdata, "replying to the PREQ\n");
mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
orig_sn, 0, target_addr,
target_sn, mgmt->sa, 0, ttl,
lifetime, target_metric, 0,
sdata);
} else {
ifmsh->mshstats.dropped_frames_ttl++;
}
}
if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
u32 preq_id;
u8 hopcount;
ttl = PREQ_IE_TTL(preq_elem);
lifetime = PREQ_IE_LIFETIME(preq_elem);
if (ttl <= 1) {
ifmsh->mshstats.dropped_frames_ttl++;
return;
}
mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr);
--ttl;
preq_id = PREQ_IE_PREQ_ID(preq_elem);
hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
da = (mpath && mpath->is_root) ?
mpath->rann_snd_addr : broadcast_addr;
if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
target_sn = PREQ_IE_TARGET_SN(preq_elem);
}
mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
orig_sn, target_flags, target_addr,
target_sn, da, hopcount, ttl, lifetime,
orig_metric, preq_id, sdata);
if (!is_multicast_ether_addr(da))
ifmsh->mshstats.fwded_unicast++;
else
ifmsh->mshstats.fwded_mcast++;
ifmsh->mshstats.fwded_frames++;
}
}
static inline struct sta_info *
next_hop_deref_protected(struct mesh_path *mpath)
{
return rcu_dereference_protected(mpath->next_hop,
lockdep_is_held(&mpath->state_lock));
}
static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
const u8 *prep_elem, u32 metric)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_path *mpath;
const u8 *target_addr, *orig_addr;
u8 ttl, hopcount, flags;
u8 next_hop[ETH_ALEN];
u32 target_sn, orig_sn, lifetime;
mhwmp_dbg(sdata, "received PREP from %pM\n",
PREP_IE_TARGET_ADDR(prep_elem));
orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
if (ether_addr_equal(orig_addr, sdata->vif.addr))
/* destination, no forwarding required */
return;
if (!ifmsh->mshcfg.dot11MeshForwarding)
return;
ttl = PREP_IE_TTL(prep_elem);
if (ttl <= 1) {
sdata->u.mesh.mshstats.dropped_frames_ttl++;
return;
}
rcu_read_lock();
mpath = mesh_path_lookup(sdata, orig_addr);
if (mpath)
spin_lock_bh(&mpath->state_lock);
else
goto fail;
if (!(mpath->flags & MESH_PATH_ACTIVE)) {
spin_unlock_bh(&mpath->state_lock);
goto fail;
}
memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
spin_unlock_bh(&mpath->state_lock);
--ttl;
flags = PREP_IE_FLAGS(prep_elem);
lifetime = PREP_IE_LIFETIME(prep_elem);
hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
target_addr = PREP_IE_TARGET_ADDR(prep_elem);
target_sn = PREP_IE_TARGET_SN(prep_elem);
orig_sn = PREP_IE_ORIG_SN(prep_elem);
mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0,
target_addr, target_sn, next_hop, hopcount,
ttl, lifetime, metric, 0, sdata);
rcu_read_unlock();
sdata->u.mesh.mshstats.fwded_unicast++;
sdata->u.mesh.mshstats.fwded_frames++;
return;
fail:
rcu_read_unlock();
sdata->u.mesh.mshstats.dropped_frames_no_route++;
}
static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
const u8 *perr_elem)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_path *mpath;
u8 ttl;
const u8 *ta, *target_addr;
u32 target_sn;
u16 target_rcode;
ta = mgmt->sa;
ttl = PERR_IE_TTL(perr_elem);
if (ttl <= 1) {
ifmsh->mshstats.dropped_frames_ttl++;
return;
}
ttl--;
target_addr = PERR_IE_TARGET_ADDR(perr_elem);
target_sn = PERR_IE_TARGET_SN(perr_elem);
target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
rcu_read_lock();
mpath = mesh_path_lookup(sdata, target_addr);
if (mpath) {
struct sta_info *sta;
spin_lock_bh(&mpath->state_lock);
sta = next_hop_deref_protected(mpath);
if (mpath->flags & MESH_PATH_ACTIVE &&
ether_addr_equal(ta, sta->sta.addr) &&
!(mpath->flags & MESH_PATH_FIXED) &&
(!(mpath->flags & MESH_PATH_SN_VALID) ||
SN_GT(target_sn, mpath->sn) || target_sn == 0)) {
mpath->flags &= ~MESH_PATH_ACTIVE;
if (target_sn != 0)
mpath->sn = target_sn;
else
mpath->sn += 1;
spin_unlock_bh(&mpath->state_lock);
if (!ifmsh->mshcfg.dot11MeshForwarding)
goto endperr;
mesh_path_error_tx(sdata, ttl, target_addr,
target_sn, target_rcode,
broadcast_addr);
} else
spin_unlock_bh(&mpath->state_lock);
}
endperr:
rcu_read_unlock();
}
static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
const struct ieee80211_rann_ie *rann)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
struct mesh_path *mpath;
u8 ttl, flags, hopcount;
const u8 *orig_addr;
u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
bool root_is_gate;
ttl = rann->rann_ttl;
flags = rann->rann_flags;
root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
orig_addr = rann->rann_addr;
orig_sn = le32_to_cpu(rann->rann_seq);
interval = le32_to_cpu(rann->rann_interval);
hopcount = rann->rann_hopcount;
hopcount++;
orig_metric = le32_to_cpu(rann->rann_metric);
/* Ignore our own RANNs */
if (ether_addr_equal(orig_addr, sdata->vif.addr))
return;
mhwmp_dbg(sdata,
"received RANN from %pM via neighbour %pM (is_gate=%d)\n",
orig_addr, mgmt->sa, root_is_gate);
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
if (!sta) {
rcu_read_unlock();
return;
}
last_hop_metric = airtime_link_metric_get(local, sta);
new_metric = orig_metric + last_hop_metric;
if (new_metric < orig_metric)
new_metric = MAX_METRIC;
mpath = mesh_path_lookup(sdata, orig_addr);
if (!mpath) {
mpath = mesh_path_add(sdata, orig_addr);
if (IS_ERR(mpath)) {
rcu_read_unlock();
sdata->u.mesh.mshstats.dropped_frames_no_route++;
return;
}
}
if (!(SN_LT(mpath->sn, orig_sn)) &&
!(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
rcu_read_unlock();
return;
}
if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
(time_after(jiffies, mpath->last_preq_to_root +
root_path_confirmation_jiffies(sdata)) ||
time_before(jiffies, mpath->last_preq_to_root))) &&
!(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
mhwmp_dbg(sdata,
"time to refresh root mpath %pM\n",
orig_addr);
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
mpath->last_preq_to_root = jiffies;
}
mpath->sn = orig_sn;
mpath->rann_metric = new_metric;
mpath->is_root = true;
/* Recording RANNs sender address to send individually
* addressed PREQs destined for root mesh STA */
memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
if (root_is_gate)
mesh_path_add_gate(mpath);
if (ttl <= 1) {
ifmsh->mshstats.dropped_frames_ttl++;
rcu_read_unlock();
return;
}
ttl--;
if (ifmsh->mshcfg.dot11MeshForwarding) {
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
orig_sn, 0, NULL, 0, broadcast_addr,
hopcount, ttl, interval,
new_metric, 0, sdata);
}
rcu_read_unlock();
}
void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee802_11_elems *elems;
size_t baselen;
u32 path_metric;
struct sta_info *sta;
/* need action_code */
if (len < IEEE80211_MIN_ACTION_SIZE + 1)
return;
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
rcu_read_unlock();
return;
}
rcu_read_unlock();
baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
elems = ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
len - baselen, false, NULL);
if (!elems)
return;
if (elems->preq) {
if (elems->preq_len != 37)
/* Right now we support just 1 destination and no AE */
goto free;
path_metric = hwmp_route_info_get(sdata, mgmt, elems->preq,
MPATH_PREQ);
if (path_metric)
hwmp_preq_frame_process(sdata, mgmt, elems->preq,
path_metric);
}
if (elems->prep) {
if (elems->prep_len != 31)
/* Right now we support no AE */
goto free;
path_metric = hwmp_route_info_get(sdata, mgmt, elems->prep,
MPATH_PREP);
if (path_metric)
hwmp_prep_frame_process(sdata, mgmt, elems->prep,
path_metric);
}
if (elems->perr) {
if (elems->perr_len != 15)
/* Right now we support only one destination per PERR */
goto free;
hwmp_perr_frame_process(sdata, mgmt, elems->perr);
}
if (elems->rann)
hwmp_rann_frame_process(sdata, mgmt, elems->rann);
free:
kfree(elems);
}
/**
* mesh_queue_preq - queue a PREQ to a given destination
*
* @mpath: mesh path to discover
* @flags: special attributes of the PREQ to be sent
*
* Locking: the function must be called from within a rcu read lock block.
*
*/
static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
{
struct ieee80211_sub_if_data *sdata = mpath->sdata;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_preq_queue *preq_node;
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
if (!preq_node) {
mhwmp_dbg(sdata, "could not allocate PREQ node\n");
return;
}
spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
kfree(preq_node);
if (printk_ratelimit())
mhwmp_dbg(sdata, "PREQ node queue full\n");
return;
}
spin_lock(&mpath->state_lock);
if (mpath->flags & MESH_PATH_REQ_QUEUED) {
spin_unlock(&mpath->state_lock);
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
kfree(preq_node);
return;
}
memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
preq_node->flags = flags;
mpath->flags |= MESH_PATH_REQ_QUEUED;
spin_unlock(&mpath->state_lock);
list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
++ifmsh->preq_queue_len;
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
else if (time_before(jiffies, ifmsh->last_preq)) {
/* avoid long wait if did not send preqs for a long time
* and jiffies wrapped around
*/
ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
} else
mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
min_preq_int_jiff(sdata));
}
/**
* mesh_path_start_discovery - launch a path discovery from the PREQ queue
*
* @sdata: local mesh subif
*/
void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_preq_queue *preq_node;
struct mesh_path *mpath;
u8 ttl, target_flags = 0;
const u8 *da;
u32 lifetime;
spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
if (!ifmsh->preq_queue_len ||
time_before(jiffies, ifmsh->last_preq +
min_preq_int_jiff(sdata))) {
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
return;
}
preq_node = list_first_entry(&ifmsh->preq_queue.list,
struct mesh_preq_queue, list);
list_del(&preq_node->list);
--ifmsh->preq_queue_len;
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
rcu_read_lock();
mpath = mesh_path_lookup(sdata, preq_node->dst);
if (!mpath)
goto enddiscovery;
spin_lock_bh(&mpath->state_lock);
if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) {
spin_unlock_bh(&mpath->state_lock);
goto enddiscovery;
}
mpath->flags &= ~MESH_PATH_REQ_QUEUED;
if (preq_node->flags & PREQ_Q_F_START) {
if (mpath->flags & MESH_PATH_RESOLVING) {
spin_unlock_bh(&mpath->state_lock);
goto enddiscovery;
} else {
mpath->flags &= ~MESH_PATH_RESOLVED;
mpath->flags |= MESH_PATH_RESOLVING;
mpath->discovery_retries = 0;
mpath->discovery_timeout = disc_timeout_jiff(sdata);
}
} else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
mpath->flags & MESH_PATH_RESOLVED) {
mpath->flags &= ~MESH_PATH_RESOLVING;
spin_unlock_bh(&mpath->state_lock);
goto enddiscovery;
}
ifmsh->last_preq = jiffies;
if (time_after(jiffies, ifmsh->last_sn_update +
net_traversal_jiffies(sdata)) ||
time_before(jiffies, ifmsh->last_sn_update)) {
++ifmsh->sn;
sdata->u.mesh.last_sn_update = jiffies;
}
lifetime = default_lifetime(sdata);
ttl = sdata->u.mesh.mshcfg.element_ttl;
if (ttl == 0) {
sdata->u.mesh.mshstats.dropped_frames_ttl++;
spin_unlock_bh(&mpath->state_lock);
goto enddiscovery;
}
if (preq_node->flags & PREQ_Q_F_REFRESH)
target_flags |= IEEE80211_PREQ_TO_FLAG;
else
target_flags &= ~IEEE80211_PREQ_TO_FLAG;
spin_unlock_bh(&mpath->state_lock);
da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
target_flags, mpath->dst, mpath->sn, da, 0,
ttl, lifetime, 0, ifmsh->preq_id++, sdata);
spin_lock_bh(&mpath->state_lock);
if (!(mpath->flags & MESH_PATH_DELETED))
mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
spin_unlock_bh(&mpath->state_lock);
enddiscovery:
rcu_read_unlock();
kfree(preq_node);
}
/**
* mesh_nexthop_resolve - lookup next hop; conditionally start path discovery
*
* @skb: 802.11 frame to be sent
* @sdata: network subif the frame will be sent through
*
* Lookup next hop for given skb and start path discovery if no
* forwarding information is found.
*
* Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
* skb is freed here if no mpath could be allocated.
*/
int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mesh_path *mpath;
struct sk_buff *skb_to_free = NULL;
u8 *target_addr = hdr->addr3;
/* Nulls are only sent to peers for PS and should be pre-addressed */
if (ieee80211_is_qos_nullfunc(hdr->frame_control))
return 0;
/* Allow injected packets to bypass mesh routing */
if (info->control.flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP)
return 0;
if (!mesh_nexthop_lookup(sdata, skb))
return 0;
/* no nexthop found, start resolving */
mpath = mesh_path_lookup(sdata, target_addr);
if (!mpath) {
mpath = mesh_path_add(sdata, target_addr);
if (IS_ERR(mpath)) {
mesh_path_discard_frame(sdata, skb);
return PTR_ERR(mpath);
}
}
if (!(mpath->flags & MESH_PATH_RESOLVING) &&
mesh_path_sel_is_hwmp(sdata))
mesh_queue_preq(mpath, PREQ_Q_F_START);
if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
skb_to_free = skb_dequeue(&mpath->frame_queue);
info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
ieee80211_set_qos_hdr(sdata, skb);
skb_queue_tail(&mpath->frame_queue, skb);
if (skb_to_free)
mesh_path_discard_frame(sdata, skb_to_free);
return -ENOENT;
}
/**
* mesh_nexthop_lookup_nolearn - try to set next hop without path discovery
* @skb: 802.11 frame to be sent
* @sdata: network subif the frame will be sent through
*
* Check if the meshDA (addr3) of a unicast frame is a direct neighbor.
* And if so, set the RA (addr1) to it to transmit to this node directly,
* avoiding PREQ/PREP path discovery.
*
* Returns: 0 if the next hop was found and -ENOENT otherwise.
*/
static int mesh_nexthop_lookup_nolearn(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct sta_info *sta;
if (is_multicast_ether_addr(hdr->addr1))
return -ENOENT;
rcu_read_lock();
sta = sta_info_get(sdata, hdr->addr3);
if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
rcu_read_unlock();
return -ENOENT;
}
rcu_read_unlock();
memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
return 0;
}
void mesh_path_refresh(struct ieee80211_sub_if_data *sdata,
struct mesh_path *mpath, const u8 *addr)
{
if (mpath->flags & (MESH_PATH_REQ_QUEUED | MESH_PATH_FIXED |
MESH_PATH_RESOLVING))
return;
if (time_after(jiffies,
mpath->exp_time -
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
(!addr || ether_addr_equal(sdata->vif.addr, addr)))
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
}
/**
* mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
* this function is considered "using" the associated mpath, so preempt a path
* refresh if this mpath expires soon.
*
* @skb: 802.11 frame to be sent
* @sdata: network subif the frame will be sent through
*
* Returns: 0 if the next hop was found. Nonzero otherwise.
*/
int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_path *mpath;
struct sta_info *next_hop;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
u8 *target_addr = hdr->addr3;
if (ifmsh->mshcfg.dot11MeshNolearn &&
!mesh_nexthop_lookup_nolearn(sdata, skb))
return 0;
mpath = mesh_path_lookup(sdata, target_addr);
if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
return -ENOENT;
mesh_path_refresh(sdata, mpath, hdr->addr4);
next_hop = rcu_dereference(mpath->next_hop);
if (next_hop) {
memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
ieee80211_mps_set_frame_flags(sdata, next_hop, hdr);
if (ieee80211_hw_check(&sdata->local->hw, SUPPORT_FAST_XMIT))
mesh_fast_tx_cache(sdata, skb, mpath);
return 0;
}
return -ENOENT;
}
void mesh_path_timer(struct timer_list *t)
{
struct mesh_path *mpath = from_timer(mpath, t, timer);
struct ieee80211_sub_if_data *sdata = mpath->sdata;
int ret;
if (sdata->local->quiescing)
return;
spin_lock_bh(&mpath->state_lock);
if (mpath->flags & MESH_PATH_RESOLVED ||
(!(mpath->flags & MESH_PATH_RESOLVING))) {
mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
spin_unlock_bh(&mpath->state_lock);
} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
++mpath->discovery_retries;
mpath->discovery_timeout *= 2;
mpath->flags &= ~MESH_PATH_REQ_QUEUED;
spin_unlock_bh(&mpath->state_lock);
mesh_queue_preq(mpath, 0);
} else {
mpath->flags &= ~(MESH_PATH_RESOLVING |
MESH_PATH_RESOLVED |
MESH_PATH_REQ_QUEUED);
mpath->exp_time = jiffies;
spin_unlock_bh(&mpath->state_lock);
if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
ret = mesh_path_send_to_gates(mpath);
if (ret)
mhwmp_dbg(sdata, "no gate was reachable\n");
} else
mesh_path_flush_pending(mpath);
}
}
void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
u8 flags, target_flags = 0;
flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
? RANN_FLAG_IS_GATE : 0;
switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
case IEEE80211_PROACTIVE_RANN:
mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
++ifmsh->sn, 0, NULL, 0, broadcast_addr,
0, ifmsh->mshcfg.element_ttl,
interval, 0, 0, sdata);
break;
case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
fallthrough;
case IEEE80211_PROACTIVE_PREQ_NO_PREP:
interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
target_flags |= IEEE80211_PREQ_TO_FLAG |
IEEE80211_PREQ_USN_FLAG;
mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
++ifmsh->sn, target_flags,
(u8 *) broadcast_addr, 0, broadcast_addr,
0, ifmsh->mshcfg.element_ttl, interval,
0, ifmsh->preq_id++, sdata);
break;
default:
mhwmp_dbg(sdata, "Proactive mechanism not supported\n");
return;
}
}
| linux-master | net/mac80211/mesh_hwmp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VHT handling
*
* Portions of this file
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright (C) 2018 - 2022 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "rate.h"
static void __check_vhtcap_disable(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_vht_cap *vht_cap,
u32 flag)
{
__le32 le_flag = cpu_to_le32(flag);
if (sdata->u.mgd.vht_capa_mask.vht_cap_info & le_flag &&
!(sdata->u.mgd.vht_capa.vht_cap_info & le_flag))
vht_cap->cap &= ~flag;
}
void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_vht_cap *vht_cap)
{
int i;
u16 rxmcs_mask, rxmcs_cap, rxmcs_n, txmcs_mask, txmcs_cap, txmcs_n;
if (!vht_cap->vht_supported)
return;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return;
__check_vhtcap_disable(sdata, vht_cap,
IEEE80211_VHT_CAP_RXLDPC);
__check_vhtcap_disable(sdata, vht_cap,
IEEE80211_VHT_CAP_SHORT_GI_80);
__check_vhtcap_disable(sdata, vht_cap,
IEEE80211_VHT_CAP_SHORT_GI_160);
__check_vhtcap_disable(sdata, vht_cap,
IEEE80211_VHT_CAP_TXSTBC);
__check_vhtcap_disable(sdata, vht_cap,
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
__check_vhtcap_disable(sdata, vht_cap,
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
__check_vhtcap_disable(sdata, vht_cap,
IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN);
__check_vhtcap_disable(sdata, vht_cap,
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN);
/* Allow user to decrease AMPDU length exponent */
if (sdata->u.mgd.vht_capa_mask.vht_cap_info &
cpu_to_le32(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK)) {
u32 cap, n;
n = le32_to_cpu(sdata->u.mgd.vht_capa.vht_cap_info) &
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
n >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
cap = vht_cap->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
cap >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
if (n < cap) {
vht_cap->cap &=
~IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
vht_cap->cap |=
n << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
}
}
/* Allow the user to decrease MCSes */
rxmcs_mask =
le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.rx_mcs_map);
rxmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.rx_mcs_map);
rxmcs_n &= rxmcs_mask;
rxmcs_cap = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
txmcs_mask =
le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.tx_mcs_map);
txmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.tx_mcs_map);
txmcs_n &= txmcs_mask;
txmcs_cap = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
for (i = 0; i < 8; i++) {
u8 m, n, c;
m = (rxmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
n = (rxmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
c = (rxmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) ||
n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) {
rxmcs_cap &= ~(3 << 2*i);
rxmcs_cap |= (rxmcs_n & (3 << 2*i));
}
m = (txmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
n = (txmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
c = (txmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) ||
n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) {
txmcs_cap &= ~(3 << 2*i);
txmcs_cap |= (txmcs_n & (3 << 2*i));
}
}
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_cap);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_cap);
}
void
ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_vht_cap *vht_cap_ie,
struct link_sta_info *link_sta)
{
struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap;
struct ieee80211_sta_vht_cap own_cap;
u32 cap_info, i;
bool have_80mhz;
memset(vht_cap, 0, sizeof(*vht_cap));
if (!link_sta->pub->ht_cap.ht_supported)
return;
if (!vht_cap_ie || !sband->vht_cap.vht_supported)
return;
/* Allow VHT if at least one channel on the sband supports 80 MHz */
have_80mhz = false;
for (i = 0; i < sband->n_channels; i++) {
if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
IEEE80211_CHAN_NO_80MHZ))
continue;
have_80mhz = true;
break;
}
if (!have_80mhz)
return;
/*
* A VHT STA must support 40 MHz, but if we verify that here
* then we break a few things - some APs (e.g. Netgear R6300v2
* and others based on the BCM4360 chipset) will unset this
* capability bit when operating in 20 MHz.
*/
vht_cap->vht_supported = true;
own_cap = sband->vht_cap;
/*
* If user has specified capability overrides, take care
* of that if the station we're setting up is the AP that
* we advertised a restricted capability set to. Override
* our own capabilities and then use those below.
*/
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
!test_sta_flag(link_sta->sta, WLAN_STA_TDLS_PEER))
ieee80211_apply_vhtcap_overrides(sdata, &own_cap);
/* take some capabilities as-is */
cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info);
vht_cap->cap = cap_info;
vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_VHT_TXOP_PS |
IEEE80211_VHT_CAP_HTC_VHT |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB |
IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB |
IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK,
own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK);
/* and some based on our own capabilities */
switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
vht_cap->cap |= cap_info &
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
break;
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
vht_cap->cap |= cap_info &
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
break;
default:
/* nothing */
break;
}
/* symmetric capabilities */
vht_cap->cap |= cap_info & own_cap.cap &
(IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160);
/* remaining ones */
if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
vht_cap->cap |= cap_info &
(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
vht_cap->cap |= cap_info &
(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
vht_cap->cap |= cap_info &
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
vht_cap->cap |= cap_info &
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
if (own_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_RXSTBC_MASK;
if (own_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_TXSTBC;
/* Copy peer MCS info, the driver might need them. */
memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs,
sizeof(struct ieee80211_vht_mcs_info));
/* copy EXT_NSS_BW Support value or remove the capability */
if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_VHT_EXT_NSS_BW))
vht_cap->cap |= (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK);
else
vht_cap->vht_mcs.tx_highest &=
~cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
/* but also restrict MCSes */
for (i = 0; i < 8; i++) {
u16 own_rx, own_tx, peer_rx, peer_tx;
own_rx = le16_to_cpu(own_cap.vht_mcs.rx_mcs_map);
own_rx = (own_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
own_tx = le16_to_cpu(own_cap.vht_mcs.tx_mcs_map);
own_tx = (own_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
peer_rx = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
peer_rx = (peer_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
peer_tx = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
peer_tx = (peer_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
if (peer_tx != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
if (own_rx == IEEE80211_VHT_MCS_NOT_SUPPORTED)
peer_tx = IEEE80211_VHT_MCS_NOT_SUPPORTED;
else if (own_rx < peer_tx)
peer_tx = own_rx;
}
if (peer_rx != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
if (own_tx == IEEE80211_VHT_MCS_NOT_SUPPORTED)
peer_rx = IEEE80211_VHT_MCS_NOT_SUPPORTED;
else if (own_tx < peer_rx)
peer_rx = own_tx;
}
vht_cap->vht_mcs.rx_mcs_map &=
~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2);
vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(peer_rx << i * 2);
vht_cap->vht_mcs.tx_mcs_map &=
~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2);
vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2);
}
/*
* This is a workaround for VHT-enabled STAs which break the spec
* and have the VHT-MCS Rx map filled in with value 3 for all eight
* spacial streams, an example is AR9462.
*
* As per spec, in section 22.1.1 Introduction to the VHT PHY
* A VHT STA shall support at least single spactial stream VHT-MCSs
* 0 to 7 (transmit and receive) in all supported channel widths.
*/
if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) {
vht_cap->vht_supported = false;
sdata_info(sdata,
"Ignoring VHT IE from %pM (link:%pM) due to invalid rx_mcs_map\n",
link_sta->sta->addr, link_sta->addr);
return;
}
/* finally set up the bandwidth */
switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
break;
default:
link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
if (!(vht_cap->vht_mcs.tx_highest &
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)))
break;
/*
* If this is non-zero, then it does support 160 MHz after all,
* in one form or the other. We don't distinguish here (or even
* above) between 160 and 80+80 yet.
*/
if (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)
link_sta->cur_max_bandwidth =
IEEE80211_STA_RX_BW_160;
}
link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
/*
* FIXME - should the amsdu len be per link? store per link
* and maintain a minimum?
*/
switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
break;
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991;
break;
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
default:
link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895;
break;
}
ieee80211_sta_recalc_aggregates(&link_sta->sta->sta);
}
/* FIXME: move this to some better location - parses HE/EHT now */
enum ieee80211_sta_rx_bandwidth
ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
{
unsigned int link_id = link_sta->link_id;
struct ieee80211_sub_if_data *sdata = link_sta->sta->sdata;
struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap;
struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap;
struct ieee80211_sta_eht_cap *eht_cap = &link_sta->pub->eht_cap;
u32 cap_width;
if (he_cap->has_he) {
struct ieee80211_bss_conf *link_conf;
enum ieee80211_sta_rx_bandwidth ret;
u8 info;
rcu_read_lock();
link_conf = rcu_dereference(sdata->vif.link_conf[link_id]);
if (eht_cap->has_eht &&
link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
info = eht_cap->eht_cap_elem.phy_cap_info[0];
if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) {
ret = IEEE80211_STA_RX_BW_320;
goto out;
}
}
info = he_cap->he_cap_elem.phy_cap_info[0];
if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ) {
if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
ret = IEEE80211_STA_RX_BW_40;
else
ret = IEEE80211_STA_RX_BW_20;
goto out;
}
if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G ||
info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
ret = IEEE80211_STA_RX_BW_160;
else if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
ret = IEEE80211_STA_RX_BW_80;
else
ret = IEEE80211_STA_RX_BW_20;
out:
rcu_read_unlock();
return ret;
}
if (!vht_cap->vht_supported)
return link_sta->pub->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
IEEE80211_STA_RX_BW_40 :
IEEE80211_STA_RX_BW_20;
cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ ||
cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return IEEE80211_STA_RX_BW_160;
/*
* If this is non-zero, then it does support 160 MHz after all,
* in one form or the other. We don't distinguish here (or even
* above) between 160 and 80+80 yet.
*/
if (vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)
return IEEE80211_STA_RX_BW_160;
return IEEE80211_STA_RX_BW_80;
}
enum nl80211_chan_width
ieee80211_sta_cap_chan_bw(struct link_sta_info *link_sta)
{
struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap;
u32 cap_width;
if (!vht_cap->vht_supported) {
if (!link_sta->pub->ht_cap.ht_supported)
return NL80211_CHAN_WIDTH_20_NOHT;
return link_sta->pub->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20;
}
cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)
return NL80211_CHAN_WIDTH_160;
else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return NL80211_CHAN_WIDTH_80P80;
return NL80211_CHAN_WIDTH_80;
}
enum nl80211_chan_width
ieee80211_sta_rx_bw_to_chan_width(struct link_sta_info *link_sta)
{
enum ieee80211_sta_rx_bandwidth cur_bw =
link_sta->pub->bandwidth;
struct ieee80211_sta_vht_cap *vht_cap =
&link_sta->pub->vht_cap;
u32 cap_width;
switch (cur_bw) {
case IEEE80211_STA_RX_BW_20:
if (!link_sta->pub->ht_cap.ht_supported)
return NL80211_CHAN_WIDTH_20_NOHT;
else
return NL80211_CHAN_WIDTH_20;
case IEEE80211_STA_RX_BW_40:
return NL80211_CHAN_WIDTH_40;
case IEEE80211_STA_RX_BW_80:
return NL80211_CHAN_WIDTH_80;
case IEEE80211_STA_RX_BW_160:
cap_width =
vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)
return NL80211_CHAN_WIDTH_160;
return NL80211_CHAN_WIDTH_80P80;
default:
return NL80211_CHAN_WIDTH_20;
}
}
enum ieee80211_sta_rx_bandwidth
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
{
switch (width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
return IEEE80211_STA_RX_BW_20;
case NL80211_CHAN_WIDTH_40:
return IEEE80211_STA_RX_BW_40;
case NL80211_CHAN_WIDTH_80:
return IEEE80211_STA_RX_BW_80;
case NL80211_CHAN_WIDTH_160:
case NL80211_CHAN_WIDTH_80P80:
return IEEE80211_STA_RX_BW_160;
case NL80211_CHAN_WIDTH_320:
return IEEE80211_STA_RX_BW_320;
default:
WARN_ON_ONCE(1);
return IEEE80211_STA_RX_BW_20;
}
}
/* FIXME: rename/move - this deals with everything not just VHT */
enum ieee80211_sta_rx_bandwidth
ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
{
struct sta_info *sta = link_sta->sta;
struct ieee80211_bss_conf *link_conf;
enum nl80211_chan_width bss_width;
enum ieee80211_sta_rx_bandwidth bw;
rcu_read_lock();
link_conf = rcu_dereference(sta->sdata->vif.link_conf[link_sta->link_id]);
if (WARN_ON(!link_conf))
bss_width = NL80211_CHAN_WIDTH_20_NOHT;
else
bss_width = link_conf->chandef.width;
rcu_read_unlock();
bw = ieee80211_sta_cap_rx_bw(link_sta);
bw = min(bw, link_sta->cur_max_bandwidth);
/* Don't consider AP's bandwidth for TDLS peers, section 11.23.1 of
* IEEE80211-2016 specification makes higher bandwidth operation
* possible on the TDLS link if the peers have wider bandwidth
* capability.
*
* However, in this case, and only if the TDLS peer is authorized,
* limit to the tdls_chandef so that the configuration here isn't
* wider than what's actually requested on the channel context.
*/
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) &&
test_sta_flag(sta, WLAN_STA_AUTHORIZED) &&
sta->tdls_chandef.chan)
bw = min(bw, ieee80211_chan_width_to_rx_bw(sta->tdls_chandef.width));
else
bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
return bw;
}
void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
{
u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss;
bool support_160;
/* if we received a notification already don't overwrite it */
if (link_sta->pub->rx_nss)
return;
if (link_sta->pub->eht_cap.has_eht) {
int i;
const u8 *rx_nss_mcs = (void *)&link_sta->pub->eht_cap.eht_mcs_nss_supp;
/* get the max nss for EHT over all possible bandwidths and mcs */
for (i = 0; i < sizeof(struct ieee80211_eht_mcs_nss_supp); i++)
eht_rx_nss = max_t(u8, eht_rx_nss,
u8_get_bits(rx_nss_mcs[i],
IEEE80211_EHT_MCS_NSS_RX));
}
if (link_sta->pub->he_cap.has_he) {
int i;
u8 rx_mcs_80 = 0, rx_mcs_160 = 0;
const struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap;
u16 mcs_160_map =
le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
u16 mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
for (i = 7; i >= 0; i--) {
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
if (mcs_160 != IEEE80211_HE_MCS_NOT_SUPPORTED) {
rx_mcs_160 = i + 1;
break;
}
}
for (i = 7; i >= 0; i--) {
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
if (mcs_80 != IEEE80211_HE_MCS_NOT_SUPPORTED) {
rx_mcs_80 = i + 1;
break;
}
}
support_160 = he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
if (support_160)
he_rx_nss = min(rx_mcs_80, rx_mcs_160);
else
he_rx_nss = rx_mcs_80;
}
if (link_sta->pub->ht_cap.ht_supported) {
if (link_sta->pub->ht_cap.mcs.rx_mask[0])
ht_rx_nss++;
if (link_sta->pub->ht_cap.mcs.rx_mask[1])
ht_rx_nss++;
if (link_sta->pub->ht_cap.mcs.rx_mask[2])
ht_rx_nss++;
if (link_sta->pub->ht_cap.mcs.rx_mask[3])
ht_rx_nss++;
/* FIXME: consider rx_highest? */
}
if (link_sta->pub->vht_cap.vht_supported) {
int i;
u16 rx_mcs_map;
rx_mcs_map = le16_to_cpu(link_sta->pub->vht_cap.vht_mcs.rx_mcs_map);
for (i = 7; i >= 0; i--) {
u8 mcs = (rx_mcs_map >> (2 * i)) & 3;
if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
vht_rx_nss = i + 1;
break;
}
}
/* FIXME: consider rx_highest? */
}
rx_nss = max(vht_rx_nss, ht_rx_nss);
rx_nss = max(he_rx_nss, rx_nss);
rx_nss = max(eht_rx_nss, rx_nss);
link_sta->pub->rx_nss = max_t(u8, 1, rx_nss);
}
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
struct link_sta_info *link_sta,
u8 opmode, enum nl80211_band band)
{
enum ieee80211_sta_rx_bandwidth new_bw;
struct sta_opmode_info sta_opmode = {};
u32 changed = 0;
u8 nss, cur_nss;
/* ignore - no support for BF yet */
if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
return 0;
nss = opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
if (link_sta->pub->rx_nss != nss) {
cur_nss = link_sta->pub->rx_nss;
/* Reset rx_nss and call ieee80211_sta_set_rx_nss() which
* will set the same to max nss value calculated based on capability.
*/
link_sta->pub->rx_nss = 0;
ieee80211_sta_set_rx_nss(link_sta);
/* Do not allow an nss change to rx_nss greater than max_nss
* negotiated and capped to APs capability during association.
*/
if (nss <= link_sta->pub->rx_nss) {
link_sta->pub->rx_nss = nss;
sta_opmode.rx_nss = nss;
changed |= IEEE80211_RC_NSS_CHANGED;
sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
} else {
link_sta->pub->rx_nss = cur_nss;
pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d",
link_sta->pub->addr, nss);
}
}
switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
/* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */
link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20;
break;
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ:
/* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */
link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_40;
break;
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ:
if (opmode & IEEE80211_OPMODE_NOTIF_BW_160_80P80)
link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
else
link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
break;
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ:
/* legacy only, no longer used by newer spec */
link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
break;
}
new_bw = ieee80211_sta_cur_vht_bw(link_sta);
if (new_bw != link_sta->pub->bandwidth) {
link_sta->pub->bandwidth = new_bw;
sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(link_sta);
changed |= IEEE80211_RC_BW_CHANGED;
sta_opmode.changed |= STA_OPMODE_MAX_BW_CHANGED;
}
if (sta_opmode.changed)
cfg80211_sta_opmode_change_notify(sdata->dev, link_sta->addr,
&sta_opmode, GFP_KERNEL);
return changed;
}
void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
struct ieee80211_mgmt *mgmt)
{
struct ieee80211_bss_conf *link_conf = link->conf;
if (!link_conf->mu_mimo_owner)
return;
if (!memcmp(mgmt->u.action.u.vht_group_notif.position,
link_conf->mu_group.position, WLAN_USER_POSITION_LEN) &&
!memcmp(mgmt->u.action.u.vht_group_notif.membership,
link_conf->mu_group.membership, WLAN_MEMBERSHIP_LEN))
return;
memcpy(link_conf->mu_group.membership,
mgmt->u.action.u.vht_group_notif.membership,
WLAN_MEMBERSHIP_LEN);
memcpy(link_conf->mu_group.position,
mgmt->u.action.u.vht_group_notif.position,
WLAN_USER_POSITION_LEN);
ieee80211_link_info_change_notify(sdata, link,
BSS_CHANGED_MU_GROUPS);
}
void ieee80211_update_mu_groups(struct ieee80211_vif *vif, unsigned int link_id,
const u8 *membership, const u8 *position)
{
struct ieee80211_bss_conf *link_conf;
rcu_read_lock();
link_conf = rcu_dereference(vif->link_conf[link_id]);
if (!WARN_ON_ONCE(!link_conf || !link_conf->mu_mimo_owner)) {
memcpy(link_conf->mu_group.membership, membership,
WLAN_MEMBERSHIP_LEN);
memcpy(link_conf->mu_group.position, position,
WLAN_USER_POSITION_LEN);
}
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ieee80211_update_mu_groups);
void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
struct link_sta_info *link_sta,
u8 opmode, enum nl80211_band band)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
u32 changed = __ieee80211_vht_handle_opmode(sdata, link_sta,
opmode, band);
if (changed > 0) {
ieee80211_recalc_min_chandef(sdata, link_sta->link_id);
rate_control_rate_update(local, sband, link_sta->sta,
link_sta->link_id, changed);
}
}
void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
u16 vht_mask[NL80211_VHT_NSS_MAX])
{
int i;
u16 mask, cap = le16_to_cpu(vht_cap);
for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
mask = (cap >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
switch (mask) {
case IEEE80211_VHT_MCS_SUPPORT_0_7:
vht_mask[i] = 0x00FF;
break;
case IEEE80211_VHT_MCS_SUPPORT_0_8:
vht_mask[i] = 0x01FF;
break;
case IEEE80211_VHT_MCS_SUPPORT_0_9:
vht_mask[i] = 0x03FF;
break;
case IEEE80211_VHT_MCS_NOT_SUPPORTED:
default:
vht_mask[i] = 0;
break;
}
}
}
| linux-master | net/mac80211/vht.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007 Johannes Berg <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2018-2022 Intel Corporation
*
* Transmit and frame generation functions.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/bitmap.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <net/codel.h>
#include <net/codel_impl.h>
#include <asm/unaligned.h>
#include <net/fq_impl.h>
#include <net/gso.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "led.h"
#include "mesh.h"
#include "wep.h"
#include "wpa.h"
#include "wme.h"
#include "rate.h"
/* misc utils */
static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
struct sk_buff *skb, int group_addr,
int next_frag_len)
{
int rate, mrate, erp, dur, i, shift = 0;
struct ieee80211_rate *txrate;
struct ieee80211_local *local = tx->local;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_chanctx_conf *chanctx_conf;
u32 rate_flags = 0;
/* assume HW handles this */
if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))
return 0;
rcu_read_lock();
chanctx_conf = rcu_dereference(tx->sdata->vif.bss_conf.chanctx_conf);
if (chanctx_conf) {
shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
}
rcu_read_unlock();
/* uh huh? */
if (WARN_ON_ONCE(tx->rate.idx < 0))
return 0;
sband = local->hw.wiphy->bands[info->band];
txrate = &sband->bitrates[tx->rate.idx];
erp = txrate->flags & IEEE80211_RATE_ERP_G;
/* device is expected to do this */
if (sband->band == NL80211_BAND_S1GHZ)
return 0;
/*
* data and mgmt (except PS Poll):
* - during CFP: 32768
* - during contention period:
* if addr1 is group address: 0
* if more fragments = 0 and addr1 is individual address: time to
* transmit one ACK plus SIFS
* if more fragments = 1 and addr1 is individual address: time to
* transmit next fragment plus 2 x ACK plus 3 x SIFS
*
* IEEE 802.11, 9.6:
* - control response frame (CTS or ACK) shall be transmitted using the
* same rate as the immediately previous frame in the frame exchange
* sequence, if this rate belongs to the PHY mandatory rates, or else
* at the highest possible rate belonging to the PHY rates in the
* BSSBasicRateSet
*/
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_ctl(hdr->frame_control)) {
/* TODO: These control frames are not currently sent by
* mac80211, but should they be implemented, this function
* needs to be updated to support duration field calculation.
*
* RTS: time needed to transmit pending data/mgmt frame plus
* one CTS frame plus one ACK frame plus 3 x SIFS
* CTS: duration of immediately previous RTS minus time
* required to transmit CTS and its SIFS
* ACK: 0 if immediately previous directed data/mgmt had
* more=0, with more=1 duration in ACK frame is duration
* from previous frame minus time needed to transmit ACK
* and its SIFS
* PS Poll: BIT(15) | BIT(14) | aid
*/
return 0;
}
/* data/mgmt */
if (0 /* FIX: data/mgmt during CFP */)
return cpu_to_le16(32768);
if (group_addr) /* Group address as the destination - no ACK */
return 0;
/* Individual destination address:
* IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes)
* CTS and ACK frames shall be transmitted using the highest rate in
* basic rate set that is less than or equal to the rate of the
* immediately previous frame and that is using the same modulation
* (CCK or OFDM). If no basic rate set matches with these requirements,
* the highest mandatory rate of the PHY that is less than or equal to
* the rate of the previous frame is used.
* Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
*/
rate = -1;
/* use lowest available if everything fails */
mrate = sband->bitrates[0].bitrate;
for (i = 0; i < sband->n_bitrates; i++) {
struct ieee80211_rate *r = &sband->bitrates[i];
if (r->bitrate > txrate->bitrate)
break;
if ((rate_flags & r->flags) != rate_flags)
continue;
if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
switch (sband->band) {
case NL80211_BAND_2GHZ:
case NL80211_BAND_LC: {
u32 flag;
if (tx->sdata->deflink.operating_11g_mode)
flag = IEEE80211_RATE_MANDATORY_G;
else
flag = IEEE80211_RATE_MANDATORY_B;
if (r->flags & flag)
mrate = r->bitrate;
break;
}
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
if (r->flags & IEEE80211_RATE_MANDATORY_A)
mrate = r->bitrate;
break;
case NL80211_BAND_S1GHZ:
case NL80211_BAND_60GHZ:
/* TODO, for now fall through */
case NUM_NL80211_BANDS:
WARN_ON(1);
break;
}
}
if (rate == -1) {
/* No matching basic rate found; use highest suitable mandatory
* PHY rate */
rate = DIV_ROUND_UP(mrate, 1 << shift);
}
/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
if (ieee80211_is_data_qos(hdr->frame_control) &&
*(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
dur = 0;
else
/* Time needed to transmit ACK
* (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
* to closest integer */
dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
tx->sdata->vif.bss_conf.use_short_preamble,
shift);
if (next_frag_len) {
/* Frame is fragmented: duration increases with time needed to
* transmit next fragment plus ACK and 2 x SIFS. */
dur *= 2; /* ACK + SIFS */
/* next fragment */
dur += ieee80211_frame_duration(sband->band, next_frag_len,
txrate->bitrate, erp,
tx->sdata->vif.bss_conf.use_short_preamble,
shift);
}
return cpu_to_le16(dur);
}
/* tx handlers */
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
{
struct ieee80211_local *local = tx->local;
struct ieee80211_if_managed *ifmgd;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
/* driver doesn't support power save */
if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
return TX_CONTINUE;
/* hardware does dynamic power save */
if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
return TX_CONTINUE;
/* dynamic power save disabled */
if (local->hw.conf.dynamic_ps_timeout <= 0)
return TX_CONTINUE;
/* we are scanning, don't enable power save */
if (local->scanning)
return TX_CONTINUE;
if (!local->ps_sdata)
return TX_CONTINUE;
/* No point if we're going to suspend */
if (local->quiescing)
return TX_CONTINUE;
/* dynamic ps is supported only in managed mode */
if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
return TX_CONTINUE;
if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
return TX_CONTINUE;
ifmgd = &tx->sdata->u.mgd;
/*
* Don't wakeup from power save if u-apsd is enabled, voip ac has
* u-apsd enabled and the frame is in voip class. This effectively
* means that even if all access categories have u-apsd enabled, in
* practise u-apsd is only used with the voip ac. This is a
* workaround for the case when received voip class packets do not
* have correct qos tag for some reason, due the network or the
* peer application.
*
* Note: ifmgd->uapsd_queues access is racy here. If the value is
* changed via debugfs, user needs to reassociate manually to have
* everything in sync.
*/
if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) &&
(ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) &&
skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO)
return TX_CONTINUE;
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
ieee80211_stop_queues_by_reason(&local->hw,
IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_PS,
false);
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
ieee80211_queue_work(&local->hw,
&local->dynamic_ps_disable_work);
}
/* Don't restart the timer if we're not disassociated */
if (!ifmgd->associated)
return TX_CONTINUE;
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
return TX_CONTINUE;
}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
bool assoc = false;
if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
return TX_CONTINUE;
if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
!ieee80211_is_probe_req(hdr->frame_control) &&
!ieee80211_is_any_nullfunc(hdr->frame_control))
/*
* When software scanning only nullfunc frames (to notify
* the sleep state to the AP) and probe requests (for the
* active scan) are allowed, all other frames should not be
* sent and we should not get here, but if we do
* nonetheless, drop them to avoid sending them
* off-channel. See the link below and
* ieee80211_start_scan() for more.
*
* http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
*/
return TX_DROP;
if (tx->sdata->vif.type == NL80211_IFTYPE_OCB)
return TX_CONTINUE;
if (tx->flags & IEEE80211_TX_PS_BUFFERED)
return TX_CONTINUE;
if (tx->sta)
assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
if (unlikely(!assoc &&
ieee80211_is_data(hdr->frame_control))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
sdata_info(tx->sdata,
"dropped data frame to not associated station %pM\n",
hdr->addr1);
#endif
I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
return TX_DROP;
}
} else if (unlikely(ieee80211_is_data(hdr->frame_control) &&
ieee80211_vif_get_num_mcast_if(tx->sdata) == 0)) {
/*
* No associated STAs - no need to send multicast
* frames.
*/
return TX_DROP;
}
return TX_CONTINUE;
}
/* This function is called whenever the AP is about to exceed the maximum limit
* of buffered frames for power saving STAs. This situation should not really
* happen often during normal operation, so dropping the oldest buffered packet
* from each queue should be OK to make some room for new frames. */
static void purge_old_ps_buffers(struct ieee80211_local *local)
{
int total = 0, purged = 0;
struct sk_buff *skb;
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
struct ps_data *ps;
if (sdata->vif.type == NL80211_IFTYPE_AP)
ps = &sdata->u.ap.ps;
else if (ieee80211_vif_is_mesh(&sdata->vif))
ps = &sdata->u.mesh.ps;
else
continue;
skb = skb_dequeue(&ps->bc_buf);
if (skb) {
purged++;
ieee80211_free_txskb(&local->hw, skb);
}
total += skb_queue_len(&ps->bc_buf);
}
/*
* Drop one frame from each station from the lowest-priority
* AC that has frames at all.
*/
list_for_each_entry_rcu(sta, &local->sta_list, list) {
int ac;
for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) {
skb = skb_dequeue(&sta->ps_tx_buf[ac]);
total += skb_queue_len(&sta->ps_tx_buf[ac]);
if (skb) {
purged++;
ieee80211_free_txskb(&local->hw, skb);
break;
}
}
}
local->total_ps_buffered = total;
ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
}
static ieee80211_tx_result
ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ps_data *ps;
/*
* broadcast/multicast frame
*
* If any of the associated/peer stations is in power save mode,
* the frame is buffered to be sent after DTIM beacon frame.
* This is done either by the hardware or us.
*/
/* powersaving STAs currently only in AP/VLAN/mesh mode */
if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
if (!tx->sdata->bss)
return TX_CONTINUE;
ps = &tx->sdata->bss->ps;
} else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) {
ps = &tx->sdata->u.mesh.ps;
} else {
return TX_CONTINUE;
}
/* no buffering for ordered frames */
if (ieee80211_has_order(hdr->frame_control))
return TX_CONTINUE;
if (ieee80211_is_probe_req(hdr->frame_control))
return TX_CONTINUE;
if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
info->hw_queue = tx->sdata->vif.cab_queue;
/* no stations in PS mode and no buffered packets */
if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
return TX_CONTINUE;
info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
/* device releases frame after DTIM beacon */
if (!ieee80211_hw_check(&tx->local->hw, HOST_BROADCAST_PS_BUFFERING))
return TX_CONTINUE;
/* buffered in mac80211 */
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
ps_dbg(tx->sdata,
"BC TX buffer full - dropping the oldest frame\n");
ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
} else
tx->local->total_ps_buffered++;
skb_queue_tail(&ps->bc_buf, tx->skb);
return TX_QUEUED;
}
static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
struct sk_buff *skb)
{
if (!ieee80211_is_mgmt(fc))
return 0;
if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
return 0;
if (!ieee80211_is_robust_mgmt_frame(skb))
return 0;
return 1;
}
static ieee80211_tx_result
ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
{
struct sta_info *sta = tx->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_local *local = tx->local;
if (unlikely(!sta))
return TX_CONTINUE;
if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
test_sta_flag(sta, WLAN_STA_PS_DELIVER)) &&
!(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
int ac = skb_get_queue_mapping(tx->skb);
if (ieee80211_is_mgmt(hdr->frame_control) &&
!ieee80211_is_bufferable_mmpdu(tx->skb)) {
info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
return TX_CONTINUE;
}
ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
sta->sta.addr, sta->sta.aid, ac);
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
/* sync with ieee80211_sta_ps_deliver_wakeup */
spin_lock(&sta->ps_lock);
/*
* STA woke up the meantime and all the frames on ps_tx_buf have
* been queued to pending queue. No reordering can happen, go
* ahead and Tx the packet.
*/
if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
!test_sta_flag(sta, WLAN_STA_PS_DRIVER) &&
!test_sta_flag(sta, WLAN_STA_PS_DELIVER)) {
spin_unlock(&sta->ps_lock);
return TX_CONTINUE;
}
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
ps_dbg(tx->sdata,
"STA %pM TX buffer for AC %d full - dropping oldest frame\n",
sta->sta.addr, ac);
ieee80211_free_txskb(&local->hw, old);
} else
tx->local->total_ps_buffered++;
info->control.jiffies = jiffies;
info->control.vif = &tx->sdata->vif;
info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
spin_unlock(&sta->ps_lock);
if (!timer_pending(&local->sta_cleanup))
mod_timer(&local->sta_cleanup,
round_jiffies(jiffies +
STA_INFO_CLEANUP_INTERVAL));
/*
* We queued up some frames, so the TIM bit might
* need to be set, recalculate it.
*/
sta_info_recalc_tim(sta);
return TX_QUEUED;
} else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
ps_dbg(tx->sdata,
"STA %pM in PS mode, but polling/in SP -> send frame\n",
sta->sta.addr);
}
return TX_CONTINUE;
}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
{
if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
return TX_CONTINUE;
if (tx->flags & IEEE80211_TX_UNICAST)
return ieee80211_tx_h_unicast_ps_buf(tx);
else
return ieee80211_tx_h_multicast_ps_buf(tx);
}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) {
if (tx->sdata->control_port_no_encrypt)
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
}
return TX_CONTINUE;
}
static struct ieee80211_key *
ieee80211_select_link_key(struct ieee80211_tx_data *tx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_link_data *link;
unsigned int link_id;
link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
if (link_id == IEEE80211_LINK_UNSPECIFIED) {
link = &tx->sdata->deflink;
} else {
link = rcu_dereference(tx->sdata->link[link_id]);
if (!link)
return NULL;
}
if (ieee80211_is_group_privacy_action(tx->skb))
return rcu_dereference(link->default_multicast_key);
else if (ieee80211_is_mgmt(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1) &&
ieee80211_is_robust_mgmt_frame(tx->skb))
return rcu_dereference(link->default_mgmt_key);
else if (is_multicast_ether_addr(hdr->addr1))
return rcu_dereference(link->default_multicast_key);
return NULL;
}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
{
struct ieee80211_key *key;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
tx->key = NULL;
return TX_CONTINUE;
}
if (tx->sta &&
(key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
tx->key = key;
else if ((key = ieee80211_select_link_key(tx)))
tx->key = key;
else if (!is_multicast_ether_addr(hdr->addr1) &&
(key = rcu_dereference(tx->sdata->default_unicast_key)))
tx->key = key;
else
tx->key = NULL;
if (tx->key) {
bool skip_hw = false;
/* TODO: add threshold stuff again */
switch (tx->key->conf.cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
if (!ieee80211_is_data_present(hdr->frame_control))
tx->key = NULL;
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
if (!ieee80211_is_data_present(hdr->frame_control) &&
!ieee80211_use_mfp(hdr->frame_control, tx->sta,
tx->skb) &&
!ieee80211_is_group_privacy_action(tx->skb))
tx->key = NULL;
else
skip_hw = (tx->key->conf.flags &
IEEE80211_KEY_FLAG_SW_MGMT_TX) &&
ieee80211_is_mgmt(hdr->frame_control);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
if (!ieee80211_is_mgmt(hdr->frame_control))
tx->key = NULL;
break;
}
if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
!ieee80211_is_deauth(hdr->frame_control)))
return TX_DROP;
if (!skip_hw && tx->key &&
tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
info->control.hw_key = &tx->key->conf;
} else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta &&
test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
return TX_DROP;
}
return TX_CONTINUE;
}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (void *)tx->skb->data;
struct ieee80211_supported_band *sband;
u32 len;
struct ieee80211_tx_rate_control txrc;
struct ieee80211_sta_rates *ratetbl = NULL;
bool encap = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
bool assoc = false;
memset(&txrc, 0, sizeof(txrc));
sband = tx->local->hw.wiphy->bands[info->band];
len = min_t(u32, tx->skb->len + FCS_LEN,
tx->local->hw.wiphy->frag_threshold);
/* set up the tx rate control struct we give the RC algo */
txrc.hw = &tx->local->hw;
txrc.sband = sband;
txrc.bss_conf = &tx->sdata->vif.bss_conf;
txrc.skb = tx->skb;
txrc.reported_rate.idx = -1;
txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
if (tx->sdata->rc_has_mcs_mask[info->band])
txrc.rate_idx_mcs_mask =
tx->sdata->rc_rateidx_mcs_mask[info->band];
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
tx->sdata->vif.type == NL80211_IFTYPE_ADHOC ||
tx->sdata->vif.type == NL80211_IFTYPE_OCB);
/* set up RTS protection if desired */
if (len > tx->local->hw.wiphy->rts_threshold) {
txrc.rts = true;
}
info->control.use_rts = txrc.rts;
info->control.use_cts_prot = tx->sdata->vif.bss_conf.use_cts_prot;
/*
* Use short preamble if the BSS can handle it, but not for
* management frames unless we know the receiver can handle
* that -- the management frame might be to a station that
* just wants a probe response.
*/
if (tx->sdata->vif.bss_conf.use_short_preamble &&
(ieee80211_is_tx_data(tx->skb) ||
(tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
txrc.short_preamble = true;
info->control.short_preamble = txrc.short_preamble;
/* don't ask rate control when rate already injected via radiotap */
if (info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)
return TX_CONTINUE;
if (tx->sta)
assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
/*
* Lets not bother rate control if we're associated and cannot
* talk to the sta. This should not happen.
*/
if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc &&
!rate_usable_index_exists(sband, &tx->sta->sta),
"%s: Dropped data frame as no usable bitrate found while "
"scanning and associated. Target station: "
"%pM on %d GHz band\n",
tx->sdata->name,
encap ? ((struct ethhdr *)hdr)->h_dest : hdr->addr1,
info->band ? 5 : 2))
return TX_DROP;
/*
* If we're associated with the sta at this point we know we can at
* least send the frame at the lowest bit rate.
*/
rate_control_get_rate(tx->sdata, tx->sta, &txrc);
if (tx->sta && !info->control.skip_table)
ratetbl = rcu_dereference(tx->sta->sta.rates);
if (unlikely(info->control.rates[0].idx < 0)) {
if (ratetbl) {
struct ieee80211_tx_rate rate = {
.idx = ratetbl->rate[0].idx,
.flags = ratetbl->rate[0].flags,
.count = ratetbl->rate[0].count
};
if (ratetbl->rate[0].idx < 0)
return TX_DROP;
tx->rate = rate;
} else {
return TX_DROP;
}
} else {
tx->rate = info->control.rates[0];
}
if (txrc.reported_rate.idx < 0) {
txrc.reported_rate = tx->rate;
if (tx->sta && ieee80211_is_tx_data(tx->skb))
tx->sta->deflink.tx_stats.last_rate = txrc.reported_rate;
} else if (tx->sta)
tx->sta->deflink.tx_stats.last_rate = txrc.reported_rate;
if (ratetbl)
return TX_CONTINUE;
if (unlikely(!info->control.rates[0].count))
info->control.rates[0].count = 1;
if (WARN_ON_ONCE((info->control.rates[0].count > 1) &&
(info->flags & IEEE80211_TX_CTL_NO_ACK)))
info->control.rates[0].count = 1;
return TX_CONTINUE;
}
static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid)
{
u16 *seq = &sta->tid_seq[tid];
__le16 ret = cpu_to_le16(*seq);
/* Increase the sequence number. */
*seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
return ret;
}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
int tid;
/*
* Packet injection may want to control the sequence
* number, if we have no matching interface then we
* neither assign one ourselves nor ask the driver to.
*/
if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR))
return TX_CONTINUE;
if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
return TX_CONTINUE;
if (ieee80211_hdrlen(hdr->frame_control) < 24)
return TX_CONTINUE;
if (ieee80211_is_qos_nullfunc(hdr->frame_control))
return TX_CONTINUE;
if (info->control.flags & IEEE80211_TX_CTRL_NO_SEQNO)
return TX_CONTINUE;
/* SNS11 from 802.11be 10.3.2.14 */
if (unlikely(is_multicast_ether_addr(hdr->addr1) &&
ieee80211_vif_is_mld(info->control.vif) &&
info->control.vif->type == NL80211_IFTYPE_AP)) {
if (info->control.flags & IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX)
tx->sdata->mld_mcast_seq += 0x10;
hdr->seq_ctrl = cpu_to_le16(tx->sdata->mld_mcast_seq);
return TX_CONTINUE;
}
/*
* Anything but QoS data that has a sequence number field
* (is long enough) gets a sequence number from the global
* counter. QoS data frames with a multicast destination
* also use the global counter (802.11-2012 9.3.2.10).
*/
if (!ieee80211_is_data_qos(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1)) {
/* driver should assign sequence number */
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
/* for pure STA mode without beacons, we can do it */
hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
tx->sdata->sequence_number += 0x10;
if (tx->sta)
tx->sta->deflink.tx_stats.msdu[IEEE80211_NUM_TIDS]++;
return TX_CONTINUE;
}
/*
* This should be true for injected/management frames only, for
* management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ
* above since they are not QoS-data frames.
*/
if (!tx->sta)
return TX_CONTINUE;
/* include per-STA, per-TID sequence counter */
tid = ieee80211_get_tid(hdr);
tx->sta->deflink.tx_stats.msdu[tid]++;
hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
return TX_CONTINUE;
}
static int ieee80211_fragment(struct ieee80211_tx_data *tx,
struct sk_buff *skb, int hdrlen,
int frag_threshold)
{
struct ieee80211_local *local = tx->local;
struct ieee80211_tx_info *info;
struct sk_buff *tmp;
int per_fragm = frag_threshold - hdrlen - FCS_LEN;
int pos = hdrlen + per_fragm;
int rem = skb->len - hdrlen - per_fragm;
if (WARN_ON(rem < 0))
return -EINVAL;
/* first fragment was already added to queue by caller */
while (rem) {
int fraglen = per_fragm;
if (fraglen > rem)
fraglen = rem;
rem -= fraglen;
tmp = dev_alloc_skb(local->tx_headroom +
frag_threshold +
IEEE80211_ENCRYPT_HEADROOM +
IEEE80211_ENCRYPT_TAILROOM);
if (!tmp)
return -ENOMEM;
__skb_queue_tail(&tx->skbs, tmp);
skb_reserve(tmp,
local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
/* copy control information */
memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
info = IEEE80211_SKB_CB(tmp);
info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
IEEE80211_TX_CTL_FIRST_FRAGMENT);
if (rem)
info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
skb_copy_queue_mapping(tmp, skb);
tmp->priority = skb->priority;
tmp->dev = skb->dev;
/* copy header and data */
skb_put_data(tmp, skb->data, hdrlen);
skb_put_data(tmp, skb->data + pos, fraglen);
pos += fraglen;
}
/* adjust first fragment's length */
skb_trim(skb, hdrlen + per_fragm);
return 0;
}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb = tx->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
int frag_threshold = tx->local->hw.wiphy->frag_threshold;
int hdrlen;
int fragnum;
/* no matter what happens, tx->skb moves to tx->skbs */
__skb_queue_tail(&tx->skbs, skb);
tx->skb = NULL;
if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
return TX_CONTINUE;
if (ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG))
return TX_CONTINUE;
/*
* Warn when submitting a fragmented A-MPDU frame and drop it.
* This scenario is handled in ieee80211_tx_prepare but extra
* caution taken here as fragmented ampdu may cause Tx stop.
*/
if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
return TX_DROP;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
/* internal error, why isn't DONTFRAG set? */
if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
return TX_DROP;
/*
* Now fragment the frame. This will allocate all the fragments and
* chain them (using skb as the first fragment) to skb->next.
* During transmission, we will remove the successfully transmitted
* fragments from this list. When the low-level driver rejects one
* of the fragments then we will simply pretend to accept the skb
* but store it away as pending.
*/
if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold))
return TX_DROP;
/* update duration/seq/flags of fragments */
fragnum = 0;
skb_queue_walk(&tx->skbs, skb) {
const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
hdr = (void *)skb->data;
info = IEEE80211_SKB_CB(skb);
if (!skb_queue_is_last(&tx->skbs, skb)) {
hdr->frame_control |= morefrags;
/*
* No multi-rate retries for fragmented frames, that
* would completely throw off the NAV at other STAs.
*/
info->control.rates[1].idx = -1;
info->control.rates[2].idx = -1;
info->control.rates[3].idx = -1;
BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4);
info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
hdr->frame_control &= ~morefrags;
}
hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
fragnum++;
}
return TX_CONTINUE;
}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
int ac = -1;
if (!tx->sta)
return TX_CONTINUE;
skb_queue_walk(&tx->skbs, skb) {
ac = skb_get_queue_mapping(skb);
tx->sta->deflink.tx_stats.bytes[ac] += skb->len;
}
if (ac >= 0)
tx->sta->deflink.tx_stats.packets[ac]++;
return TX_CONTINUE;
}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
{
if (!tx->key)
return TX_CONTINUE;
switch (tx->key->conf.cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
return ieee80211_crypto_wep_encrypt(tx);
case WLAN_CIPHER_SUITE_TKIP:
return ieee80211_crypto_tkip_encrypt(tx);
case WLAN_CIPHER_SUITE_CCMP:
return ieee80211_crypto_ccmp_encrypt(
tx, IEEE80211_CCMP_MIC_LEN);
case WLAN_CIPHER_SUITE_CCMP_256:
return ieee80211_crypto_ccmp_encrypt(
tx, IEEE80211_CCMP_256_MIC_LEN);
case WLAN_CIPHER_SUITE_AES_CMAC:
return ieee80211_crypto_aes_cmac_encrypt(tx);
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
return ieee80211_crypto_aes_cmac_256_encrypt(tx);
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
return ieee80211_crypto_aes_gmac_encrypt(tx);
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
return ieee80211_crypto_gcmp_encrypt(tx);
}
return TX_DROP;
}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
int next_len;
bool group_addr;
skb_queue_walk(&tx->skbs, skb) {
hdr = (void *) skb->data;
if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
break; /* must not overwrite AID */
if (!skb_queue_is_last(&tx->skbs, skb)) {
struct sk_buff *next = skb_queue_next(&tx->skbs, skb);
next_len = next->len;
} else
next_len = 0;
group_addr = is_multicast_ether_addr(hdr->addr1);
hdr->duration_id =
ieee80211_duration(tx, skb, group_addr, next_len);
}
return TX_CONTINUE;
}
/* actual transmit path */
static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
struct sk_buff *skb,
struct ieee80211_tx_info *info,
struct tid_ampdu_tx *tid_tx,
int tid)
{
bool queued = false;
bool reset_agg_timer = false;
struct sk_buff *purge_skb = NULL;
if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
reset_agg_timer = true;
} else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
/*
* nothing -- this aggregation session is being started
* but that might still fail with the driver
*/
} else if (!tx->sta->sta.txq[tid]) {
spin_lock(&tx->sta->lock);
/*
* Need to re-check now, because we may get here
*
* 1) in the window during which the setup is actually
* already done, but not marked yet because not all
* packets are spliced over to the driver pending
* queue yet -- if this happened we acquire the lock
* either before or after the splice happens, but
* need to recheck which of these cases happened.
*
* 2) during session teardown, if the OPERATIONAL bit
* was cleared due to the teardown but the pointer
* hasn't been assigned NULL yet (or we loaded it
* before it was assigned) -- in this case it may
* now be NULL which means we should just let the
* packet pass through because splicing the frames
* back is already done.
*/
tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid);
if (!tid_tx) {
/* do nothing, let packet pass through */
} else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
reset_agg_timer = true;
} else {
queued = true;
if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
clear_sta_flag(tx->sta, WLAN_STA_SP);
ps_dbg(tx->sta->sdata,
"STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
tx->sta->sta.addr, tx->sta->sta.aid);
}
info->control.vif = &tx->sdata->vif;
info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
__skb_queue_tail(&tid_tx->pending, skb);
if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
purge_skb = __skb_dequeue(&tid_tx->pending);
}
spin_unlock(&tx->sta->lock);
if (purge_skb)
ieee80211_free_txskb(&tx->local->hw, purge_skb);
}
/* reset session timer */
if (reset_agg_timer)
tid_tx->last_tx = jiffies;
return queued;
}
void ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb)
{
struct rate_control_ref *ref = sdata->local->rate_ctrl;
u16 tid;
if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
return;
if (!sta || !sta->sta.deflink.ht_cap.ht_supported ||
!sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
skb->protocol == sdata->control_port_protocol)
return;
tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
if (likely(sta->ampdu_mlme.tid_tx[tid]))
return;
ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
}
/*
* initialises @tx
* pass %NULL for the station if unknown, a valid pointer if known
* or an ERR_PTR() if the station is known not to exist
*/
static ieee80211_tx_result
ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_data *tx,
struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool aggr_check = false;
int tid;
memset(tx, 0, sizeof(*tx));
tx->skb = skb;
tx->local = local;
tx->sdata = sdata;
__skb_queue_head_init(&tx->skbs);
/*
* If this flag is set to true anywhere, and we get here,
* we are doing the needed processing, so remove the flag
* now.
*/
info->control.flags &= ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
hdr = (struct ieee80211_hdr *) skb->data;
if (likely(sta)) {
if (!IS_ERR(sta))
tx->sta = sta;
} else {
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
tx->sta = rcu_dereference(sdata->u.vlan.sta);
if (!tx->sta && sdata->wdev.use_4addr)
return TX_DROP;
} else if (tx->sdata->control_port_protocol == tx->skb->protocol) {
tx->sta = sta_info_get_bss(sdata, hdr->addr1);
}
if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) {
tx->sta = sta_info_get(sdata, hdr->addr1);
aggr_check = true;
}
}
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
!ieee80211_is_qos_nullfunc(hdr->frame_control) &&
ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
!ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) {
struct tid_ampdu_tx *tid_tx;
tid = ieee80211_get_tid(hdr);
tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
if (!tid_tx && aggr_check) {
ieee80211_aggr_check(sdata, tx->sta, skb);
tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
}
if (tid_tx) {
bool queued;
queued = ieee80211_tx_prep_agg(tx, skb, info,
tid_tx, tid);
if (unlikely(queued))
return TX_QUEUED;
}
}
if (is_multicast_ether_addr(hdr->addr1)) {
tx->flags &= ~IEEE80211_TX_UNICAST;
info->flags |= IEEE80211_TX_CTL_NO_ACK;
} else
tx->flags |= IEEE80211_TX_UNICAST;
if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
if (!(tx->flags & IEEE80211_TX_UNICAST) ||
skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
info->flags & IEEE80211_TX_CTL_AMPDU)
info->flags |= IEEE80211_TX_CTL_DONTFRAG;
}
if (!tx->sta)
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) {
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
ieee80211_check_fast_xmit(tx->sta);
}
info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
return TX_CONTINUE;
}
static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
struct ieee80211_vif *vif,
struct sta_info *sta,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_txq *txq = NULL;
if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
return NULL;
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
if ((!ieee80211_is_mgmt(hdr->frame_control) ||
ieee80211_is_bufferable_mmpdu(skb) ||
vif->type == NL80211_IFTYPE_STATION) &&
sta && sta->uploaded) {
/*
* This will be NULL if the driver didn't set the
* opt-in hardware flag.
*/
txq = sta->sta.txq[IEEE80211_NUM_TIDS];
}
} else if (sta) {
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
if (!sta->uploaded)
return NULL;
txq = sta->sta.txq[tid];
} else {
txq = vif->txq;
}
if (!txq)
return NULL;
return to_txq_info(txq);
}
static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
{
struct sk_buff *next;
codel_time_t now = codel_get_time();
skb_list_walk_safe(skb, skb, next)
IEEE80211_SKB_CB(skb)->control.enqueue_time = now;
}
static u32 codel_skb_len_func(const struct sk_buff *skb)
{
return skb->len;
}
static codel_time_t codel_skb_time_func(const struct sk_buff *skb)
{
const struct ieee80211_tx_info *info;
info = (const struct ieee80211_tx_info *)skb->cb;
return info->control.enqueue_time;
}
static struct sk_buff *codel_dequeue_func(struct codel_vars *cvars,
void *ctx)
{
struct ieee80211_local *local;
struct txq_info *txqi;
struct fq *fq;
struct fq_flow *flow;
txqi = ctx;
local = vif_to_sdata(txqi->txq.vif)->local;
fq = &local->fq;
if (cvars == &txqi->def_cvars)
flow = &txqi->tin.default_flow;
else
flow = &fq->flows[cvars - local->cvars];
return fq_flow_dequeue(fq, flow);
}
static void codel_drop_func(struct sk_buff *skb,
void *ctx)
{
struct ieee80211_local *local;
struct ieee80211_hw *hw;
struct txq_info *txqi;
txqi = ctx;
local = vif_to_sdata(txqi->txq.vif)->local;
hw = &local->hw;
ieee80211_free_txskb(hw, skb);
}
static struct sk_buff *fq_tin_dequeue_func(struct fq *fq,
struct fq_tin *tin,
struct fq_flow *flow)
{
struct ieee80211_local *local;
struct txq_info *txqi;
struct codel_vars *cvars;
struct codel_params *cparams;
struct codel_stats *cstats;
local = container_of(fq, struct ieee80211_local, fq);
txqi = container_of(tin, struct txq_info, tin);
cstats = &txqi->cstats;
if (txqi->txq.sta) {
struct sta_info *sta = container_of(txqi->txq.sta,
struct sta_info, sta);
cparams = &sta->cparams;
} else {
cparams = &local->cparams;
}
if (flow == &tin->default_flow)
cvars = &txqi->def_cvars;
else
cvars = &local->cvars[flow - fq->flows];
return codel_dequeue(txqi,
&flow->backlog,
cparams,
cvars,
cstats,
codel_skb_len_func,
codel_skb_time_func,
codel_drop_func,
codel_dequeue_func);
}
static void fq_skb_free_func(struct fq *fq,
struct fq_tin *tin,
struct fq_flow *flow,
struct sk_buff *skb)
{
struct ieee80211_local *local;
local = container_of(fq, struct ieee80211_local, fq);
ieee80211_free_txskb(&local->hw, skb);
}
static void ieee80211_txq_enqueue(struct ieee80211_local *local,
struct txq_info *txqi,
struct sk_buff *skb)
{
struct fq *fq = &local->fq;
struct fq_tin *tin = &txqi->tin;
u32 flow_idx = fq_flow_idx(fq, skb);
ieee80211_set_skb_enqueue_time(skb);
spin_lock_bh(&fq->lock);
/*
* For management frames, don't really apply codel etc.,
* we don't want to apply any shaping or anything we just
* want to simplify the driver API by having them on the
* txqi.
*/
if (unlikely(txqi->txq.tid == IEEE80211_NUM_TIDS)) {
IEEE80211_SKB_CB(skb)->control.flags |=
IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
__skb_queue_tail(&txqi->frags, skb);
} else {
fq_tin_enqueue(fq, tin, flow_idx, skb,
fq_skb_free_func);
}
spin_unlock_bh(&fq->lock);
}
static bool fq_vlan_filter_func(struct fq *fq, struct fq_tin *tin,
struct fq_flow *flow, struct sk_buff *skb,
void *data)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
return info->control.vif == data;
}
void ieee80211_txq_remove_vlan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
struct fq *fq = &local->fq;
struct txq_info *txqi;
struct fq_tin *tin;
struct ieee80211_sub_if_data *ap;
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
return;
ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
if (!ap->vif.txq)
return;
txqi = to_txq_info(ap->vif.txq);
tin = &txqi->tin;
spin_lock_bh(&fq->lock);
fq_tin_filter(fq, tin, fq_vlan_filter_func, &sdata->vif,
fq_skb_free_func);
spin_unlock_bh(&fq->lock);
}
void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct txq_info *txqi, int tid)
{
fq_tin_init(&txqi->tin);
codel_vars_init(&txqi->def_cvars);
codel_stats_init(&txqi->cstats);
__skb_queue_head_init(&txqi->frags);
INIT_LIST_HEAD(&txqi->schedule_order);
txqi->txq.vif = &sdata->vif;
if (!sta) {
sdata->vif.txq = &txqi->txq;
txqi->txq.tid = 0;
txqi->txq.ac = IEEE80211_AC_BE;
return;
}
if (tid == IEEE80211_NUM_TIDS) {
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
/* Drivers need to opt in to the management MPDU TXQ */
if (!ieee80211_hw_check(&sdata->local->hw,
STA_MMPDU_TXQ))
return;
} else if (!ieee80211_hw_check(&sdata->local->hw,
BUFF_MMPDU_TXQ)) {
/* Drivers need to opt in to the bufferable MMPDU TXQ */
return;
}
txqi->txq.ac = IEEE80211_AC_VO;
} else {
txqi->txq.ac = ieee80211_ac_from_tid(tid);
}
txqi->txq.sta = &sta->sta;
txqi->txq.tid = tid;
sta->sta.txq[tid] = &txqi->txq;
}
void ieee80211_txq_purge(struct ieee80211_local *local,
struct txq_info *txqi)
{
struct fq *fq = &local->fq;
struct fq_tin *tin = &txqi->tin;
spin_lock_bh(&fq->lock);
fq_tin_reset(fq, tin, fq_skb_free_func);
ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
spin_unlock_bh(&fq->lock);
spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
list_del_init(&txqi->schedule_order);
spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
}
void ieee80211_txq_set_params(struct ieee80211_local *local)
{
if (local->hw.wiphy->txq_limit)
local->fq.limit = local->hw.wiphy->txq_limit;
else
local->hw.wiphy->txq_limit = local->fq.limit;
if (local->hw.wiphy->txq_memory_limit)
local->fq.memory_limit = local->hw.wiphy->txq_memory_limit;
else
local->hw.wiphy->txq_memory_limit = local->fq.memory_limit;
if (local->hw.wiphy->txq_quantum)
local->fq.quantum = local->hw.wiphy->txq_quantum;
else
local->hw.wiphy->txq_quantum = local->fq.quantum;
}
int ieee80211_txq_setup_flows(struct ieee80211_local *local)
{
struct fq *fq = &local->fq;
int ret;
int i;
bool supp_vht = false;
enum nl80211_band band;
ret = fq_init(fq, 4096);
if (ret)
return ret;
/*
* If the hardware doesn't support VHT, it is safe to limit the maximum
* queue size. 4 Mbytes is 64 max-size aggregates in 802.11n.
*/
for (band = 0; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband;
sband = local->hw.wiphy->bands[band];
if (!sband)
continue;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
}
if (!supp_vht)
fq->memory_limit = 4 << 20; /* 4 Mbytes */
codel_params_init(&local->cparams);
local->cparams.interval = MS2TIME(100);
local->cparams.target = MS2TIME(20);
local->cparams.ecn = true;
local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]),
GFP_KERNEL);
if (!local->cvars) {
spin_lock_bh(&fq->lock);
fq_reset(fq, fq_skb_free_func);
spin_unlock_bh(&fq->lock);
return -ENOMEM;
}
for (i = 0; i < fq->flows_cnt; i++)
codel_vars_init(&local->cvars[i]);
ieee80211_txq_set_params(local);
return 0;
}
void ieee80211_txq_teardown_flows(struct ieee80211_local *local)
{
struct fq *fq = &local->fq;
kfree(local->cvars);
local->cvars = NULL;
spin_lock_bh(&fq->lock);
fq_reset(fq, fq_skb_free_func);
spin_unlock_bh(&fq->lock);
}
static bool ieee80211_queue_skb(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct sk_buff *skb)
{
struct ieee80211_vif *vif;
struct txq_info *txqi;
if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
return false;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data, u.ap);
vif = &sdata->vif;
txqi = ieee80211_get_txq(local, vif, sta, skb);
if (!txqi)
return false;
ieee80211_txq_enqueue(local, txqi, skb);
schedule_and_wake_txq(local, txqi);
return true;
}
static bool ieee80211_tx_frags(struct ieee80211_local *local,
struct ieee80211_vif *vif,
struct sta_info *sta,
struct sk_buff_head *skbs,
bool txpending)
{
struct ieee80211_tx_control control = {};
struct sk_buff *skb, *tmp;
unsigned long flags;
skb_queue_walk_safe(skbs, skb, tmp) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int q = info->hw_queue;
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (WARN_ON_ONCE(q >= local->hw.queues)) {
__skb_unlink(skb, skbs);
ieee80211_free_txskb(&local->hw, skb);
continue;
}
#endif
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
if (local->queue_stop_reasons[q] ||
(!txpending && !skb_queue_empty(&local->pending[q]))) {
if (unlikely(info->flags &
IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
if (local->queue_stop_reasons[q] &
~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
/*
* Drop off-channel frames if queues
* are stopped for any reason other
* than off-channel operation. Never
* queue them.
*/
spin_unlock_irqrestore(
&local->queue_stop_reason_lock,
flags);
ieee80211_purge_tx_queue(&local->hw,
skbs);
return true;
}
} else {
/*
* Since queue is stopped, queue up frames for
* later transmission from the tx-pending
* tasklet when the queue is woken again.
*/
if (txpending)
skb_queue_splice_init(skbs,
&local->pending[q]);
else
skb_queue_splice_tail_init(skbs,
&local->pending[q]);
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
flags);
return false;
}
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
info->control.vif = vif;
control.sta = sta ? &sta->sta : NULL;
__skb_unlink(skb, skbs);
drv_tx(local, &control, skb);
}
return true;
}
/*
* Returns false if the frame couldn't be transmitted but was queued instead.
*/
static bool __ieee80211_tx(struct ieee80211_local *local,
struct sk_buff_head *skbs, struct sta_info *sta,
bool txpending)
{
struct ieee80211_tx_info *info;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_vif *vif;
struct sk_buff *skb;
bool result;
if (WARN_ON(skb_queue_empty(skbs)))
return true;
skb = skb_peek(skbs);
info = IEEE80211_SKB_CB(skb);
sdata = vif_to_sdata(info->control.vif);
if (sta && !sta->uploaded)
sta = NULL;
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
vif = &sdata->vif;
break;
}
sdata = rcu_dereference(local->monitor_sdata);
if (sdata) {
vif = &sdata->vif;
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
} else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
ieee80211_purge_tx_queue(&local->hw, skbs);
return true;
} else
vif = NULL;
break;
case NL80211_IFTYPE_AP_VLAN:
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data, u.ap);
fallthrough;
default:
vif = &sdata->vif;
break;
}
result = ieee80211_tx_frags(local, vif, sta, skbs, txpending);
WARN_ON_ONCE(!skb_queue_empty(skbs));
return result;
}
/*
* Invoke TX handlers, return 0 on success and non-zero if the
* frame was dropped or queued.
*
* The handlers are split into an early and late part. The latter is everything
* that can be sensitive to reordering, and will be deferred to after packets
* are dequeued from the intermediate queues (when they are enabled).
*/
static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
{
ieee80211_tx_result res = TX_DROP;
#define CALL_TXH(txh) \
do { \
res = txh(tx); \
if (res != TX_CONTINUE) \
goto txh_done; \
} while (0)
CALL_TXH(ieee80211_tx_h_dynamic_ps);
CALL_TXH(ieee80211_tx_h_check_assoc);
CALL_TXH(ieee80211_tx_h_ps_buf);
CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
CALL_TXH(ieee80211_tx_h_select_key);
txh_done:
if (unlikely(res == TX_DROP)) {
I802_DEBUG_INC(tx->local->tx_handlers_drop);
if (tx->skb)
ieee80211_free_txskb(&tx->local->hw, tx->skb);
else
ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
return -1;
} else if (unlikely(res == TX_QUEUED)) {
I802_DEBUG_INC(tx->local->tx_handlers_queued);
return -1;
}
return 0;
}
/*
* Late handlers can be called while the sta lock is held. Handlers that can
* cause packets to be generated will cause deadlock!
*/
static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
ieee80211_tx_result res = TX_CONTINUE;
if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
CALL_TXH(ieee80211_tx_h_rate_ctrl);
if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
__skb_queue_tail(&tx->skbs, tx->skb);
tx->skb = NULL;
goto txh_done;
}
CALL_TXH(ieee80211_tx_h_michael_mic_add);
CALL_TXH(ieee80211_tx_h_sequence);
CALL_TXH(ieee80211_tx_h_fragment);
/* handlers after fragment must be aware of tx info fragmentation! */
CALL_TXH(ieee80211_tx_h_stats);
CALL_TXH(ieee80211_tx_h_encrypt);
if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
CALL_TXH(ieee80211_tx_h_calculate_duration);
#undef CALL_TXH
txh_done:
if (unlikely(res == TX_DROP)) {
I802_DEBUG_INC(tx->local->tx_handlers_drop);
if (tx->skb)
ieee80211_free_txskb(&tx->local->hw, tx->skb);
else
ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
return -1;
} else if (unlikely(res == TX_QUEUED)) {
I802_DEBUG_INC(tx->local->tx_handlers_queued);
return -1;
}
return 0;
}
static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
{
int r = invoke_tx_handlers_early(tx);
if (r)
return r;
return invoke_tx_handlers_late(tx);
}
bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct sk_buff *skb,
int band, struct ieee80211_sta **sta)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_data tx;
struct sk_buff *skb2;
if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP)
return false;
info->band = band;
info->control.vif = vif;
info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)];
if (invoke_tx_handlers(&tx))
return false;
if (sta) {
if (tx.sta)
*sta = &tx.sta->sta;
else
*sta = NULL;
}
/* this function isn't suitable for fragmented data frames */
skb2 = __skb_dequeue(&tx.skbs);
if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) {
ieee80211_free_txskb(hw, skb2);
ieee80211_purge_tx_queue(hw, &tx.skbs);
return false;
}
return true;
}
EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
/*
* Returns false if the frame couldn't be transmitted but was queued instead.
*/
static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb,
bool txpending)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_data tx;
ieee80211_tx_result res_prepare;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool result = true;
if (unlikely(skb->len < 10)) {
dev_kfree_skb(skb);
return true;
}
/* initialises tx */
res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
if (unlikely(res_prepare == TX_DROP)) {
ieee80211_free_txskb(&local->hw, skb);
return true;
} else if (unlikely(res_prepare == TX_QUEUED)) {
return true;
}
/* set up hw_queue value early */
if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
!ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
info->hw_queue =
sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
if (invoke_tx_handlers_early(&tx))
return true;
if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
return true;
if (!invoke_tx_handlers_late(&tx))
result = __ieee80211_tx(local, &tx.skbs, tx.sta, txpending);
return result;
}
/* device xmit handlers */
enum ieee80211_encrypt {
ENCRYPT_NO,
ENCRYPT_MGMT,
ENCRYPT_DATA,
};
static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
int head_need,
enum ieee80211_encrypt encrypt)
{
struct ieee80211_local *local = sdata->local;
bool enc_tailroom;
int tail_need = 0;
enc_tailroom = encrypt == ENCRYPT_MGMT ||
(encrypt == ENCRYPT_DATA &&
sdata->crypto_tx_tailroom_needed_cnt);
if (enc_tailroom) {
tail_need = IEEE80211_ENCRYPT_TAILROOM;
tail_need -= skb_tailroom(skb);
tail_need = max_t(int, tail_need, 0);
}
if (skb_cloned(skb) &&
(!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
!skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
else if (head_need || tail_need)
I802_DEBUG_INC(local->tx_expand_skb_head);
else
return 0;
if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
wiphy_debug(local->hw.wiphy,
"failed to reallocate TX buffer\n");
return -ENOMEM;
}
return 0;
}
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
int headroom;
enum ieee80211_encrypt encrypt;
if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)
encrypt = ENCRYPT_NO;
else if (ieee80211_is_mgmt(hdr->frame_control))
encrypt = ENCRYPT_MGMT;
else
encrypt = ENCRYPT_DATA;
headroom = local->tx_headroom;
if (encrypt != ENCRYPT_NO)
headroom += IEEE80211_ENCRYPT_HEADROOM;
headroom -= skb_headroom(skb);
headroom = max_t(int, 0, headroom);
if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) {
ieee80211_free_txskb(&local->hw, skb);
return;
}
/* reload after potential resize */
hdr = (struct ieee80211_hdr *) skb->data;
info->control.vif = &sdata->vif;
if (ieee80211_vif_is_mesh(&sdata->vif)) {
if (ieee80211_is_data(hdr->frame_control) &&
is_unicast_ether_addr(hdr->addr1)) {
if (mesh_nexthop_resolve(sdata, skb))
return; /* skb queued: don't free */
} else {
ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
}
}
ieee80211_set_qos_hdr(sdata, skb);
ieee80211_tx(sdata, sta, skb, false);
}
static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
{
struct ieee80211_radiotap_header *rthdr =
(struct ieee80211_radiotap_header *)skb->data;
/* check for not even having the fixed radiotap header part */
if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
return false; /* too short to be possibly valid */
/* is it a header version we can trust to find length from? */
if (unlikely(rthdr->it_version))
return false; /* only version 0 is supported */
/* does the skb contain enough to deliver on the alleged length? */
if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
return false; /* skb too short for claimed rt header extent */
return true;
}
bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_radiotap_iterator iterator;
struct ieee80211_radiotap_header *rthdr =
(struct ieee80211_radiotap_header *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
NULL);
u16 txflags;
u16 rate = 0;
bool rate_found = false;
u8 rate_retries = 0;
u16 rate_flags = 0;
u8 mcs_known, mcs_flags, mcs_bw;
u16 vht_known;
u8 vht_mcs = 0, vht_nss = 0;
int i;
if (!ieee80211_validate_radiotap_len(skb))
return false;
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_CTL_DONTFRAG;
/*
* for every radiotap entry that is present
* (ieee80211_radiotap_iterator_next returns -ENOENT when no more
* entries present, or -EINVAL on error)
*/
while (!ret) {
ret = ieee80211_radiotap_iterator_next(&iterator);
if (ret)
continue;
/* see if this argument is something we can use */
switch (iterator.this_arg_index) {
/*
* You must take care when dereferencing iterator.this_arg
* for multibyte types... the pointer is not aligned. Use
* get_unaligned((type *)iterator.this_arg) to dereference
* iterator.this_arg for type "type" safely on all arches.
*/
case IEEE80211_RADIOTAP_FLAGS:
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
/*
* this indicates that the skb we have been
* handed has the 32-bit FCS CRC at the end...
* we should react to that by snipping it off
* because it will be recomputed and added
* on transmission
*/
if (skb->len < (iterator._max_length + FCS_LEN))
return false;
skb_trim(skb, skb->len - FCS_LEN);
}
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
info->flags &= ~IEEE80211_TX_CTL_DONTFRAG;
break;
case IEEE80211_RADIOTAP_TX_FLAGS:
txflags = get_unaligned_le16(iterator.this_arg);
if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK)
info->flags |= IEEE80211_TX_CTL_NO_ACK;
if (txflags & IEEE80211_RADIOTAP_F_TX_NOSEQNO)
info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO;
if (txflags & IEEE80211_RADIOTAP_F_TX_ORDER)
info->control.flags |=
IEEE80211_TX_CTRL_DONT_REORDER;
break;
case IEEE80211_RADIOTAP_RATE:
rate = *iterator.this_arg;
rate_flags = 0;
rate_found = true;
break;
case IEEE80211_RADIOTAP_DATA_RETRIES:
rate_retries = *iterator.this_arg;
break;
case IEEE80211_RADIOTAP_MCS:
mcs_known = iterator.this_arg[0];
mcs_flags = iterator.this_arg[1];
if (!(mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_MCS))
break;
rate_found = true;
rate = iterator.this_arg[2];
rate_flags = IEEE80211_TX_RC_MCS;
if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI &&
mcs_flags & IEEE80211_RADIOTAP_MCS_SGI)
rate_flags |= IEEE80211_TX_RC_SHORT_GI;
mcs_bw = mcs_flags & IEEE80211_RADIOTAP_MCS_BW_MASK;
if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW &&
mcs_bw == IEEE80211_RADIOTAP_MCS_BW_40)
rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_FEC &&
mcs_flags & IEEE80211_RADIOTAP_MCS_FEC_LDPC)
info->flags |= IEEE80211_TX_CTL_LDPC;
if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_STBC) {
u8 stbc = u8_get_bits(mcs_flags,
IEEE80211_RADIOTAP_MCS_STBC_MASK);
info->flags |=
u32_encode_bits(stbc,
IEEE80211_TX_CTL_STBC);
}
break;
case IEEE80211_RADIOTAP_VHT:
vht_known = get_unaligned_le16(iterator.this_arg);
rate_found = true;
rate_flags = IEEE80211_TX_RC_VHT_MCS;
if ((vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) &&
(iterator.this_arg[2] &
IEEE80211_RADIOTAP_VHT_FLAG_SGI))
rate_flags |= IEEE80211_TX_RC_SHORT_GI;
if (vht_known &
IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) {
if (iterator.this_arg[3] == 1)
rate_flags |=
IEEE80211_TX_RC_40_MHZ_WIDTH;
else if (iterator.this_arg[3] == 4)
rate_flags |=
IEEE80211_TX_RC_80_MHZ_WIDTH;
else if (iterator.this_arg[3] == 11)
rate_flags |=
IEEE80211_TX_RC_160_MHZ_WIDTH;
}
vht_mcs = iterator.this_arg[4] >> 4;
if (vht_mcs > 11)
vht_mcs = 0;
vht_nss = iterator.this_arg[4] & 0xF;
if (!vht_nss || vht_nss > 8)
vht_nss = 1;
break;
/*
* Please update the file
* Documentation/networking/mac80211-injection.rst
* when parsing new fields here.
*/
default:
break;
}
}
if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
return false;
if (rate_found) {
struct ieee80211_supported_band *sband =
local->hw.wiphy->bands[info->band];
info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
info->control.rates[i].idx = -1;
info->control.rates[i].flags = 0;
info->control.rates[i].count = 0;
}
if (rate_flags & IEEE80211_TX_RC_MCS) {
info->control.rates[0].idx = rate;
} else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
ieee80211_rate_set_vht(info->control.rates, vht_mcs,
vht_nss);
} else if (sband) {
for (i = 0; i < sband->n_bitrates; i++) {
if (rate * 5 != sband->bitrates[i].bitrate)
continue;
info->control.rates[0].idx = i;
break;
}
}
if (info->control.rates[0].idx < 0)
info->control.flags &= ~IEEE80211_TX_CTRL_RATE_INJECT;
info->control.rates[0].flags = rate_flags;
info->control.rates[0].count = min_t(u8, rate_retries + 1,
local->hw.max_rate_tries);
}
return true;
}
netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr;
struct ieee80211_sub_if_data *tmp_sdata, *sdata;
struct cfg80211_chan_def *chandef;
u16 len_rthdr;
int hdrlen;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
if (unlikely(!ieee80211_sdata_running(sdata)))
goto fail;
memset(info, 0, sizeof(*info));
info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_CTL_INJECTED;
/* Sanity-check the length of the radiotap header */
if (!ieee80211_validate_radiotap_len(skb))
goto fail;
/* we now know there is a radiotap header with a length we can use */
len_rthdr = ieee80211_get_radiotap_len(skb->data);
/*
* fix up the pointers accounting for the radiotap
* header still being in there. We are being given
* a precooked IEEE80211 header so no need for
* normal processing
*/
skb_set_mac_header(skb, len_rthdr);
/*
* these are just fixed to the end of the rt area since we
* don't have any better information and at this point, nobody cares
*/
skb_set_network_header(skb, len_rthdr);
skb_set_transport_header(skb, len_rthdr);
if (skb->len < len_rthdr + 2)
goto fail;
hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (skb->len < len_rthdr + hdrlen)
goto fail;
/*
* Initialize skb->protocol if the injected frame is a data frame
* carrying a rfc1042 header
*/
if (ieee80211_is_data(hdr->frame_control) &&
skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
u8 *payload = (u8 *)hdr + hdrlen;
if (ether_addr_equal(payload, rfc1042_header))
skb->protocol = cpu_to_be16((payload[6] << 8) |
payload[7]);
}
rcu_read_lock();
/*
* We process outgoing injected frames that have a local address
* we handle as though they are non-injected frames.
* This code here isn't entirely correct, the local MAC address
* isn't always enough to find the interface to use; for proper
* VLAN support we have an nl80211-based mechanism.
*
* This is necessary, for example, for old hostapd versions that
* don't use nl80211-based management TX/RX.
*/
list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(tmp_sdata))
continue;
if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
continue;
if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) {
sdata = tmp_sdata;
break;
}
}
chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (!chanctx_conf) {
tmp_sdata = rcu_dereference(local->monitor_sdata);
if (tmp_sdata)
chanctx_conf =
rcu_dereference(tmp_sdata->vif.bss_conf.chanctx_conf);
}
if (chanctx_conf)
chandef = &chanctx_conf->def;
else if (!local->use_chanctx)
chandef = &local->_oper_chandef;
else
goto fail_rcu;
/*
* Frame injection is not allowed if beaconing is not allowed
* or if we need radar detection. Beaconing is usually not allowed when
* the mode or operation (Adhoc, AP, Mesh) does not support DFS.
* Passive scan is also used in world regulatory domains where
* your country is not known and as such it should be treated as
* NO TX unless the channel is explicitly allowed in which case
* your current regulatory domain would not have the passive scan
* flag.
*
* Since AP mode uses monitor interfaces to inject/TX management
* frames we can make AP mode the exception to this rule once it
* supports radar detection as its implementation can deal with
* radar detection by itself. We can do that later by adding a
* monitor flag interfaces used for AP support.
*/
if (!cfg80211_reg_can_beacon(local->hw.wiphy, chandef,
sdata->vif.type))
goto fail_rcu;
info->band = chandef->chan->band;
/* Initialize skb->priority according to frame type and TID class,
* with respect to the sub interface that the frame will actually
* be transmitted on. If the DONT_REORDER flag is set, the original
* skb-priority is preserved to assure frames injected with this
* flag are not reordered relative to each other.
*/
ieee80211_select_queue_80211(sdata, skb, hdr);
skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
/*
* Process the radiotap header. This will now take into account the
* selected chandef above to accurately set injection rates and
* retransmissions.
*/
if (!ieee80211_parse_tx_radiotap(skb, dev))
goto fail_rcu;
/* remove the injection radiotap header */
skb_pull(skb, len_rthdr);
ieee80211_xmit(sdata, NULL, skb);
rcu_read_unlock();
return NETDEV_TX_OK;
fail_rcu:
rcu_read_unlock();
fail:
dev_kfree_skb(skb);
return NETDEV_TX_OK; /* meaning, we dealt with the skb */
}
static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb)
{
u16 ethertype = (skb->data[12] << 8) | skb->data[13];
return ethertype == ETH_P_TDLS &&
skb->len > 14 &&
skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
}
int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct sta_info **sta_out)
{
struct sta_info *sta;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
sta = rcu_dereference(sdata->u.vlan.sta);
if (sta) {
*sta_out = sta;
return 0;
} else if (sdata->wdev.use_4addr) {
return -ENOLINK;
}
fallthrough;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_ADHOC:
if (is_multicast_ether_addr(skb->data)) {
*sta_out = ERR_PTR(-ENOENT);
return 0;
}
sta = sta_info_get_bss(sdata, skb->data);
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
/* determined much later */
*sta_out = NULL;
return 0;
#endif
case NL80211_IFTYPE_STATION:
if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
sta = sta_info_get(sdata, skb->data);
if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
if (test_sta_flag(sta,
WLAN_STA_TDLS_PEER_AUTH)) {
*sta_out = sta;
return 0;
}
/*
* TDLS link during setup - throw out frames to
* peer. Allow TDLS-setup frames to unauthorized
* peers for the special case of a link teardown
* after a TDLS sta is removed due to being
* unreachable.
*/
if (!ieee80211_is_tdls_setup(skb))
return -EINVAL;
}
}
sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
if (!sta)
return -ENOLINK;
break;
default:
return -EINVAL;
}
*sta_out = sta ?: ERR_PTR(-ENOENT);
return 0;
}
static u16 ieee80211_store_ack_skb(struct ieee80211_local *local,
struct sk_buff *skb,
u32 *info_flags,
u64 *cookie)
{
struct sk_buff *ack_skb;
u16 info_id = 0;
if (skb->sk)
ack_skb = skb_clone_sk(skb);
else
ack_skb = skb_clone(skb, GFP_ATOMIC);
if (ack_skb) {
unsigned long flags;
int id;
spin_lock_irqsave(&local->ack_status_lock, flags);
id = idr_alloc(&local->ack_status_frames, ack_skb,
1, 0x2000, GFP_ATOMIC);
spin_unlock_irqrestore(&local->ack_status_lock, flags);
if (id >= 0) {
info_id = id;
*info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
if (cookie) {
*cookie = ieee80211_mgmt_tx_cookie(local);
IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie;
}
} else {
kfree_skb(ack_skb);
}
}
return info_id;
}
/**
* ieee80211_build_hdr - build 802.11 header in the given frame
* @sdata: virtual interface to build the header for
* @skb: the skb to build the header in
* @info_flags: skb flags to set
* @sta: the station pointer
* @ctrl_flags: info control flags to set
* @cookie: cookie pointer to fill (if not %NULL)
*
* This function takes the skb with 802.3 header and reformats the header to
* the appropriate IEEE 802.11 header based on which interface the packet is
* being transmitted on.
*
* Note that this function also takes care of the TX status request and
* potential unsharing of the SKB - this needs to be interleaved with the
* header building.
*
* The function requires the read-side RCU lock held
*
* Returns: the (possibly reallocated) skb or an ERR_PTR() code
*/
static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 info_flags,
struct sta_info *sta, u32 ctrl_flags,
u64 *cookie)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info;
int head_need;
u16 ethertype, hdrlen, meshhdrlen = 0;
__le16 fc;
struct ieee80211_hdr hdr;
struct ieee80211s_hdr mesh_hdr __maybe_unused;
struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
const u8 *encaps_data;
int encaps_len, skip_header_bytes;
bool wme_sta = false, authorized = false;
bool tdls_peer;
bool multicast;
u16 info_id = 0;
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
enum nl80211_band band;
int ret;
u8 link_id = u32_get_bits(ctrl_flags, IEEE80211_TX_CTRL_MLO_LINK);
if (IS_ERR(sta))
sta = NULL;
#ifdef CONFIG_MAC80211_DEBUGFS
if (local->force_tx_status)
info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
#endif
/* convert Ethernet header to proper 802.11 header (based on
* operation mode) */
ethertype = (skb->data[12] << 8) | skb->data[13];
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
if (!ieee80211_vif_is_mld(&sdata->vif))
chanctx_conf =
rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
if (sdata->wdev.use_4addr) {
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
wme_sta = sta->sta.wme;
}
if (!ieee80211_vif_is_mld(&sdata->vif)) {
struct ieee80211_sub_if_data *ap_sdata;
/* override chanctx_conf from AP (we don't have one) */
ap_sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data,
u.ap);
chanctx_conf =
rcu_dereference(ap_sdata->vif.bss_conf.chanctx_conf);
}
if (sdata->wdev.use_4addr)
break;
fallthrough;
case NL80211_IFTYPE_AP:
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA BSSID SA */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
if (ieee80211_vif_is_mld(&sdata->vif) && sta && !sta->sta.mlo) {
struct ieee80211_link_data *link;
link_id = sta->deflink.link_id;
link = rcu_dereference(sdata->link[link_id]);
if (WARN_ON(!link)) {
ret = -ENOLINK;
goto free;
}
memcpy(hdr.addr2, link->conf->addr, ETH_ALEN);
} else if (link_id == IEEE80211_LINK_UNSPECIFIED ||
(sta && sta->sta.mlo)) {
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
} else {
struct ieee80211_bss_conf *conf;
conf = rcu_dereference(sdata->vif.link_conf[link_id]);
if (unlikely(!conf)) {
ret = -ENOLINK;
goto free;
}
memcpy(hdr.addr2, conf->addr, ETH_ALEN);
}
memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 24;
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
if (!is_multicast_ether_addr(skb->data)) {
struct sta_info *next_hop;
bool mpp_lookup = true;
mpath = mesh_path_lookup(sdata, skb->data);
if (mpath) {
mpp_lookup = false;
next_hop = rcu_dereference(mpath->next_hop);
if (!next_hop ||
!(mpath->flags & (MESH_PATH_ACTIVE |
MESH_PATH_RESOLVING)))
mpp_lookup = true;
}
if (mpp_lookup) {
mppath = mpp_path_lookup(sdata, skb->data);
if (mppath)
mppath->exp_time = jiffies;
}
if (mppath && mpath)
mesh_path_del(sdata, mpath->dst);
}
/*
* Use address extension if it is a packet from
* another interface or if we know the destination
* is being proxied by a portal (i.e. portal address
* differs from proxied address)
*/
if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) &&
!(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
skb->data, skb->data + ETH_ALEN);
meshhdrlen = ieee80211_new_mesh_header(sdata, &mesh_hdr,
NULL, NULL);
} else {
/* DS -> MBSS (802.11-2012 13.11.3.3).
* For unicast with unknown forwarding information,
* destination might be in the MBSS or if that fails
* forwarded to another mesh gate. In either case
* resolution will be handled in ieee80211_xmit(), so
* leave the original DA. This also works for mcast */
const u8 *mesh_da = skb->data;
if (mppath)
mesh_da = mppath->mpp;
else if (mpath)
mesh_da = mpath->dst;
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
mesh_da, sdata->vif.addr);
if (is_multicast_ether_addr(mesh_da))
/* DA TA mSA AE:SA */
meshhdrlen = ieee80211_new_mesh_header(
sdata, &mesh_hdr,
skb->data + ETH_ALEN, NULL);
else
/* RA TA mDA mSA AE:DA SA */
meshhdrlen = ieee80211_new_mesh_header(
sdata, &mesh_hdr, skb->data,
skb->data + ETH_ALEN);
}
/* For injected frames, fill RA right away as nexthop lookup
* will be skipped.
*/
if ((ctrl_flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP) &&
is_zero_ether_addr(hdr.addr1))
memcpy(hdr.addr1, skb->data, ETH_ALEN);
break;
#endif
case NL80211_IFTYPE_STATION:
/* we already did checks when looking up the RA STA */
tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER);
if (tdls_peer) {
/* For TDLS only one link can be valid with peer STA */
int tdls_link_id = sta->sta.valid_links ?
__ffs(sta->sta.valid_links) : 0;
struct ieee80211_link_data *link;
/* DA SA BSSID */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
link = rcu_dereference(sdata->link[tdls_link_id]);
if (WARN_ON_ONCE(!link)) {
ret = -EINVAL;
goto free;
}
memcpy(hdr.addr3, link->u.mgd.bssid, ETH_ALEN);
hdrlen = 24;
} else if (sdata->u.mgd.use_4addr &&
cpu_to_be16(ethertype) != sdata->control_port_protocol) {
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memcpy(hdr.addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
} else {
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
/* BSSID SA DA */
memcpy(hdr.addr1, sdata->vif.cfg.ap_addr, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
hdrlen = 24;
}
break;
case NL80211_IFTYPE_OCB:
/* DA SA BSSID */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
eth_broadcast_addr(hdr.addr3);
hdrlen = 24;
break;
case NL80211_IFTYPE_ADHOC:
/* DA SA BSSID */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
hdrlen = 24;
break;
default:
ret = -EINVAL;
goto free;
}
if (!chanctx_conf) {
if (!ieee80211_vif_is_mld(&sdata->vif)) {
ret = -ENOTCONN;
goto free;
}
/* MLD transmissions must not rely on the band */
band = 0;
} else {
band = chanctx_conf->def.chan->band;
}
multicast = is_multicast_ether_addr(hdr.addr1);
/* sta is always NULL for mesh */
if (sta) {
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
wme_sta = sta->sta.wme;
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
/* For mesh, the use of the QoS header is mandatory */
wme_sta = true;
}
/* receiver does QoS (which also means we do) use it */
if (wme_sta) {
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
hdrlen += 2;
}
/*
* Drop unicast frames to unauthorised stations unless they are
* EAPOL frames from the local station.
*/
if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
(sdata->vif.type != NL80211_IFTYPE_OCB) &&
!multicast && !authorized &&
(cpu_to_be16(ethertype) != sdata->control_port_protocol ||
!ieee80211_is_our_addr(sdata, skb->data + ETH_ALEN, NULL)))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
sdata->name, hdr.addr1);
#endif
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
ret = -EPERM;
goto free;
}
if (unlikely(!multicast && ((skb->sk &&
skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) ||
ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS)))
info_id = ieee80211_store_ack_skb(local, skb, &info_flags,
cookie);
/*
* If the skb is shared we need to obtain our own copy.
*/
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb)) {
ret = -ENOMEM;
goto free;
}
hdr.frame_control = fc;
hdr.duration_id = 0;
hdr.seq_ctrl = 0;
skip_header_bytes = ETH_HLEN;
if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
encaps_data = bridge_tunnel_header;
encaps_len = sizeof(bridge_tunnel_header);
skip_header_bytes -= 2;
} else if (ethertype >= ETH_P_802_3_MIN) {
encaps_data = rfc1042_header;
encaps_len = sizeof(rfc1042_header);
skip_header_bytes -= 2;
} else {
encaps_data = NULL;
encaps_len = 0;
}
skb_pull(skb, skip_header_bytes);
head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
/*
* So we need to modify the skb header and hence need a copy of
* that. The head_need variable above doesn't, so far, include
* the needed header space that we don't need right away. If we
* can, then we don't reallocate right now but only after the
* frame arrives at the master device (if it does...)
*
* If we cannot, however, then we will reallocate to include all
* the ever needed space. Also, if we need to reallocate it anyway,
* make it big enough for everything we may ever need.
*/
if (head_need > 0 || skb_cloned(skb)) {
head_need += IEEE80211_ENCRYPT_HEADROOM;
head_need += local->tx_headroom;
head_need = max_t(int, 0, head_need);
if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) {
ieee80211_free_txskb(&local->hw, skb);
skb = NULL;
return ERR_PTR(-ENOMEM);
}
}
if (encaps_data)
memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
#ifdef CONFIG_MAC80211_MESH
if (meshhdrlen > 0)
memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
#endif
if (ieee80211_is_data_qos(fc)) {
__le16 *qos_control;
qos_control = skb_push(skb, 2);
memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
/*
* Maybe we could actually set some fields here, for now just
* initialise to zero to indicate no special operation.
*/
*qos_control = 0;
} else
memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
skb_reset_mac_header(skb);
info = IEEE80211_SKB_CB(skb);
memset(info, 0, sizeof(*info));
info->flags = info_flags;
info->ack_frame_id = info_id;
info->band = band;
if (likely(!cookie)) {
ctrl_flags |= u32_encode_bits(link_id,
IEEE80211_TX_CTRL_MLO_LINK);
} else {
unsigned int pre_conf_link_id;
/*
* ctrl_flags already have been set by
* ieee80211_tx_control_port(), here
* we just sanity check that
*/
pre_conf_link_id = u32_get_bits(ctrl_flags,
IEEE80211_TX_CTRL_MLO_LINK);
if (pre_conf_link_id != link_id &&
link_id != IEEE80211_LINK_UNSPECIFIED) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
net_info_ratelimited("%s: dropped frame to %pM with bad link ID request (%d vs. %d)\n",
sdata->name, hdr.addr1,
pre_conf_link_id, link_id);
#endif
ret = -EINVAL;
goto free;
}
}
info->control.flags = ctrl_flags;
return skb;
free:
kfree_skb(skb);
return ERR_PTR(ret);
}
/*
* fast-xmit overview
*
* The core idea of this fast-xmit is to remove per-packet checks by checking
* them out of band. ieee80211_check_fast_xmit() implements the out-of-band
* checks that are needed to get the sta->fast_tx pointer assigned, after which
* much less work can be done per packet. For example, fragmentation must be
* disabled or the fast_tx pointer will not be set. All the conditions are seen
* in the code here.
*
* Once assigned, the fast_tx data structure also caches the per-packet 802.11
* header and other data to aid packet processing in ieee80211_xmit_fast().
*
* The most difficult part of this is that when any of these assumptions
* change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(),
* ieee80211_check_fast_xmit() or friends) is required to reset the data,
* since the per-packet code no longer checks the conditions. This is reflected
* by the calls to these functions throughout the rest of the code, and must be
* maintained if any of the TX path checks change.
*/
void ieee80211_check_fast_xmit(struct sta_info *sta)
{
struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_hdr *hdr = (void *)build.hdr;
struct ieee80211_chanctx_conf *chanctx_conf;
__le16 fc;
if (!ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT))
return;
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_fast_tx_flush_sta(sdata, sta);
/* Locking here protects both the pointer itself, and against concurrent
* invocations winning data access races to, e.g., the key pointer that
* is used.
* Without it, the invocation of this function right after the key
* pointer changes wouldn't be sufficient, as another CPU could access
* the pointer, then stall, and then do the cache update after the CPU
* that invalidated the key.
* With the locking, such scenarios cannot happen as the check for the
* key and the fast-tx assignment are done atomically, so the CPU that
* modifies the key will either wait or other one will see the key
* cleared/changed already.
*/
spin_lock_bh(&sta->lock);
if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
sdata->vif.type == NL80211_IFTYPE_STATION)
goto out;
if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
goto out;
if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
test_sta_flag(sta, WLAN_STA_PS_DELIVER) ||
test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT))
goto out;
if (sdata->noack_map)
goto out;
/* fast-xmit doesn't handle fragmentation at all */
if (local->hw.wiphy->frag_threshold != (u32)-1 &&
!ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG))
goto out;
if (!ieee80211_vif_is_mld(&sdata->vif)) {
rcu_read_lock();
chanctx_conf =
rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (!chanctx_conf) {
rcu_read_unlock();
goto out;
}
build.band = chanctx_conf->def.chan->band;
rcu_read_unlock();
} else {
/* MLD transmissions must not rely on the band */
build.band = 0;
}
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
switch (sdata->vif.type) {
case NL80211_IFTYPE_ADHOC:
/* DA SA BSSID */
build.da_offs = offsetof(struct ieee80211_hdr, addr1);
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN);
build.hdr_len = 24;
break;
case NL80211_IFTYPE_STATION:
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
/* For TDLS only one link can be valid with peer STA */
int tdls_link_id = sta->sta.valid_links ?
__ffs(sta->sta.valid_links) : 0;
struct ieee80211_link_data *link;
/* DA SA BSSID */
build.da_offs = offsetof(struct ieee80211_hdr, addr1);
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
link = rcu_dereference(sdata->link[tdls_link_id]);
if (WARN_ON_ONCE(!link))
break;
memcpy(hdr->addr3, link->u.mgd.bssid, ETH_ALEN);
build.hdr_len = 24;
break;
}
if (sdata->u.mgd.use_4addr) {
/* non-regular ethertype cannot use the fastpath */
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memcpy(hdr->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
build.da_offs = offsetof(struct ieee80211_hdr, addr3);
build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
build.hdr_len = 30;
break;
}
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
/* BSSID SA DA */
memcpy(hdr->addr1, sdata->vif.cfg.ap_addr, ETH_ALEN);
build.da_offs = offsetof(struct ieee80211_hdr, addr3);
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
build.hdr_len = 24;
break;
case NL80211_IFTYPE_AP_VLAN:
if (sdata->wdev.use_4addr) {
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
build.da_offs = offsetof(struct ieee80211_hdr, addr3);
build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
build.hdr_len = 30;
break;
}
fallthrough;
case NL80211_IFTYPE_AP:
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA BSSID SA */
build.da_offs = offsetof(struct ieee80211_hdr, addr1);
if (sta->sta.mlo || !ieee80211_vif_is_mld(&sdata->vif)) {
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
} else {
unsigned int link_id = sta->deflink.link_id;
struct ieee80211_link_data *link;
rcu_read_lock();
link = rcu_dereference(sdata->link[link_id]);
if (WARN_ON(!link)) {
rcu_read_unlock();
goto out;
}
memcpy(hdr->addr2, link->conf->addr, ETH_ALEN);
rcu_read_unlock();
}
build.sa_offs = offsetof(struct ieee80211_hdr, addr3);
build.hdr_len = 24;
break;
default:
/* not handled on fast-xmit */
goto out;
}
if (sta->sta.wme) {
build.hdr_len += 2;
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
}
/* We store the key here so there's no point in using rcu_dereference()
* but that's fine because the code that changes the pointers will call
* this function after doing so. For a single CPU that would be enough,
* for multiple see the comment above.
*/
build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
if (!build.key)
build.key = rcu_access_pointer(sdata->default_unicast_key);
if (build.key) {
bool gen_iv, iv_spc, mmic;
gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
mmic = build.key->conf.flags &
(IEEE80211_KEY_FLAG_GENERATE_MMIC |
IEEE80211_KEY_FLAG_PUT_MIC_SPACE);
/* don't handle software crypto */
if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
goto out;
/* Key is being removed */
if (build.key->flags & KEY_FLAG_TAINTED)
goto out;
switch (build.key->conf.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
if (gen_iv)
build.pn_offs = build.hdr_len;
if (gen_iv || iv_spc)
build.hdr_len += IEEE80211_CCMP_HDR_LEN;
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
if (gen_iv)
build.pn_offs = build.hdr_len;
if (gen_iv || iv_spc)
build.hdr_len += IEEE80211_GCMP_HDR_LEN;
break;
case WLAN_CIPHER_SUITE_TKIP:
/* cannot handle MMIC or IV generation in xmit-fast */
if (mmic || gen_iv)
goto out;
if (iv_spc)
build.hdr_len += IEEE80211_TKIP_IV_LEN;
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
/* cannot handle IV generation in fast-xmit */
if (gen_iv)
goto out;
if (iv_spc)
build.hdr_len += IEEE80211_WEP_IV_LEN;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
WARN(1,
"management cipher suite 0x%x enabled for data\n",
build.key->conf.cipher);
goto out;
default:
/* we don't know how to generate IVs for this at all */
if (WARN_ON(gen_iv))
goto out;
}
fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
hdr->frame_control = fc;
memcpy(build.hdr + build.hdr_len,
rfc1042_header, sizeof(rfc1042_header));
build.hdr_len += sizeof(rfc1042_header);
fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC);
/* if the kmemdup fails, continue w/o fast_tx */
out:
/* we might have raced against another call to this function */
old = rcu_dereference_protected(sta->fast_tx,
lockdep_is_held(&sta->lock));
rcu_assign_pointer(sta->fast_tx, fast_tx);
if (old)
kfree_rcu(old, rcu_head);
spin_unlock_bh(&sta->lock);
}
void ieee80211_check_fast_xmit_all(struct ieee80211_local *local)
{
struct sta_info *sta;
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list)
ieee80211_check_fast_xmit(sta);
rcu_read_unlock();
}
void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sdata != sta->sdata &&
(!sta->sdata->bss || sta->sdata->bss != sdata->bss))
continue;
ieee80211_check_fast_xmit(sta);
}
rcu_read_unlock();
}
void ieee80211_clear_fast_xmit(struct sta_info *sta)
{
struct ieee80211_fast_tx *fast_tx;
spin_lock_bh(&sta->lock);
fast_tx = rcu_dereference_protected(sta->fast_tx,
lockdep_is_held(&sta->lock));
RCU_INIT_POINTER(sta->fast_tx, NULL);
spin_unlock_bh(&sta->lock);
if (fast_tx)
kfree_rcu(fast_tx, rcu_head);
}
static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
struct sk_buff *skb, int headroom)
{
if (skb_headroom(skb) < headroom) {
I802_DEBUG_INC(local->tx_expand_skb_head);
if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
wiphy_debug(local->hw.wiphy,
"failed to reallocate TX buffer\n");
return false;
}
}
return true;
}
static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
struct ieee80211_fast_tx *fast_tx,
struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr;
struct ethhdr *amsdu_hdr;
int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header);
int subframe_len = skb->len - hdr_len;
void *data;
u8 *qc, *h_80211_src, *h_80211_dst;
const u8 *bssid;
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
return false;
if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
return true;
if (!ieee80211_amsdu_realloc_pad(local, skb,
sizeof(*amsdu_hdr) +
local->hw.extra_tx_headroom))
return false;
data = skb_push(skb, sizeof(*amsdu_hdr));
memmove(data, data + sizeof(*amsdu_hdr), hdr_len);
hdr = data;
amsdu_hdr = data + hdr_len;
/* h_80211_src/dst is addr* field within hdr */
h_80211_src = data + fast_tx->sa_offs;
h_80211_dst = data + fast_tx->da_offs;
amsdu_hdr->h_proto = cpu_to_be16(subframe_len);
ether_addr_copy(amsdu_hdr->h_source, h_80211_src);
ether_addr_copy(amsdu_hdr->h_dest, h_80211_dst);
/* according to IEEE 802.11-2012 8.3.2 table 8-19, the outer SA/DA
* fields needs to be changed to BSSID for A-MSDU frames depending
* on FromDS/ToDS values.
*/
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
bssid = sdata->vif.cfg.ap_addr;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
bssid = sdata->vif.addr;
break;
default:
bssid = NULL;
}
if (bssid && ieee80211_has_fromds(hdr->frame_control))
ether_addr_copy(h_80211_src, bssid);
if (bssid && ieee80211_has_tods(hdr->frame_control))
ether_addr_copy(h_80211_dst, bssid);
qc = ieee80211_get_qos_ctl(hdr);
*qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
info->control.flags |= IEEE80211_TX_CTRL_AMSDU;
return true;
}
static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_fast_tx *fast_tx,
struct sk_buff *skb,
const u8 *da, const u8 *sa)
{
struct ieee80211_local *local = sdata->local;
struct fq *fq = &local->fq;
struct fq_tin *tin;
struct fq_flow *flow;
u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
struct ieee80211_txq *txq = sta->sta.txq[tid];
struct txq_info *txqi;
struct sk_buff **frag_tail, *head;
int subframe_len = skb->len - ETH_ALEN;
u8 max_subframes = sta->sta.max_amsdu_subframes;
int max_frags = local->hw.max_tx_fragments;
int max_amsdu_len = sta->sta.cur->max_amsdu_len;
int orig_truesize;
u32 flow_idx;
__be16 len;
void *data;
bool ret = false;
unsigned int orig_len;
int n = 2, nfrags, pad = 0;
u16 hdrlen;
if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
return false;
if (sdata->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
return false;
if (ieee80211_vif_is_mesh(&sdata->vif))
return false;
if (skb_is_gso(skb))
return false;
if (!txq)
return false;
txqi = to_txq_info(txq);
if (test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags))
return false;
if (sta->sta.cur->max_rc_amsdu_len)
max_amsdu_len = min_t(int, max_amsdu_len,
sta->sta.cur->max_rc_amsdu_len);
if (sta->sta.cur->max_tid_amsdu_len[tid])
max_amsdu_len = min_t(int, max_amsdu_len,
sta->sta.cur->max_tid_amsdu_len[tid]);
flow_idx = fq_flow_idx(fq, skb);
spin_lock_bh(&fq->lock);
/* TODO: Ideally aggregation should be done on dequeue to remain
* responsive to environment changes.
*/
tin = &txqi->tin;
flow = fq_flow_classify(fq, tin, flow_idx, skb);
head = skb_peek_tail(&flow->queue);
if (!head || skb_is_gso(head))
goto out;
orig_truesize = head->truesize;
orig_len = head->len;
if (skb->len + head->len > max_amsdu_len)
goto out;
nfrags = 1 + skb_shinfo(skb)->nr_frags;
nfrags += 1 + skb_shinfo(head)->nr_frags;
frag_tail = &skb_shinfo(head)->frag_list;
while (*frag_tail) {
nfrags += 1 + skb_shinfo(*frag_tail)->nr_frags;
frag_tail = &(*frag_tail)->next;
n++;
}
if (max_subframes && n > max_subframes)
goto out;
if (max_frags && nfrags > max_frags)
goto out;
if (!drv_can_aggregate_in_amsdu(local, head, skb))
goto out;
if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
goto out;
/* If n == 2, the "while (*frag_tail)" loop above didn't execute
* and frag_tail should be &skb_shinfo(head)->frag_list.
* However, ieee80211_amsdu_prepare_head() can reallocate it.
* Reload frag_tail to have it pointing to the correct place.
*/
if (n == 2)
frag_tail = &skb_shinfo(head)->frag_list;
/*
* Pad out the previous subframe to a multiple of 4 by adding the
* padding to the next one, that's being added. Note that head->len
* is the length of the full A-MSDU, but that works since each time
* we add a new subframe we pad out the previous one to a multiple
* of 4 and thus it no longer matters in the next round.
*/
hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
if ((head->len - hdrlen) & 3)
pad = 4 - ((head->len - hdrlen) & 3);
if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
2 + pad))
goto out_recalc;
ret = true;
data = skb_push(skb, ETH_ALEN + 2);
ether_addr_copy(data, da);
ether_addr_copy(data + ETH_ALEN, sa);
data += 2 * ETH_ALEN;
len = cpu_to_be16(subframe_len);
memcpy(data, &len, 2);
memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
memset(skb_push(skb, pad), 0, pad);
head->len += skb->len;
head->data_len += skb->len;
*frag_tail = skb;
out_recalc:
fq->memory_usage += head->truesize - orig_truesize;
if (head->len != orig_len) {
flow->backlog += head->len - orig_len;
tin->backlog_bytes += head->len - orig_len;
}
out:
spin_unlock_bh(&fq->lock);
return ret;
}
/*
* Can be called while the sta lock is held. Anything that can cause packets to
* be generated will cause deadlock!
*/
static ieee80211_tx_result
ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, u8 pn_offs,
struct ieee80211_key *key,
struct ieee80211_tx_data *tx)
{
struct sk_buff *skb = tx->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
u8 tid = IEEE80211_NUM_TIDS;
if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL) &&
ieee80211_tx_h_rate_ctrl(tx) != TX_CONTINUE)
return TX_DROP;
if (key)
info->control.hw_key = &key->conf;
dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
} else {
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
sdata->sequence_number += 0x10;
}
if (skb_shinfo(skb)->gso_size)
sta->deflink.tx_stats.msdu[tid] +=
DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
else
sta->deflink.tx_stats.msdu[tid]++;
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
/* statistics normally done by ieee80211_tx_h_stats (but that
* has to consider fragmentation, so is more complex)
*/
sta->deflink.tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
sta->deflink.tx_stats.packets[skb_get_queue_mapping(skb)]++;
if (pn_offs) {
u64 pn;
u8 *crypto_hdr = skb->data + pn_offs;
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
pn = atomic64_inc_return(&key->conf.tx_pn);
crypto_hdr[0] = pn;
crypto_hdr[1] = pn >> 8;
crypto_hdr[3] = 0x20 | (key->conf.keyidx << 6);
crypto_hdr[4] = pn >> 16;
crypto_hdr[5] = pn >> 24;
crypto_hdr[6] = pn >> 32;
crypto_hdr[7] = pn >> 40;
break;
}
}
return TX_CONTINUE;
}
static netdev_features_t
ieee80211_sdata_netdev_features(struct ieee80211_sub_if_data *sdata)
{
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
return sdata->vif.netdev_features;
if (!sdata->bss)
return 0;
sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
return sdata->vif.netdev_features;
}
static struct sk_buff *
ieee80211_tx_skb_fixup(struct sk_buff *skb, netdev_features_t features)
{
if (skb_is_gso(skb)) {
struct sk_buff *segs;
segs = skb_gso_segment(skb, features);
if (!segs)
return skb;
if (IS_ERR(segs))
goto free;
consume_skb(skb);
return segs;
}
if (skb_needs_linearize(skb, features) && __skb_linearize(skb))
goto free;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ofs = skb_checksum_start_offset(skb);
if (skb->encapsulation)
skb_set_inner_transport_header(skb, ofs);
else
skb_set_transport_header(skb, ofs);
if (skb_csum_hwoffload_help(skb, features))
goto free;
}
skb_mark_not_on_list(skb);
return skb;
free:
kfree_skb(skb);
return NULL;
}
void __ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_fast_tx *fast_tx,
struct sk_buff *skb, bool ampdu,
const u8 *da, const u8 *sa)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
struct ieee80211_tx_info *info;
struct ieee80211_tx_data tx;
ieee80211_tx_result r;
int hw_headroom = sdata->local->hw.extra_tx_headroom;
int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return;
if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb, da, sa))
return;
/* will not be crypto-handled beyond what we do here, so use false
* as the may-encrypt argument for the resize to not account for
* more room than we already have in 'extra_head'
*/
if (unlikely(ieee80211_skb_resize(sdata, skb,
max_t(int, extra_head + hw_headroom -
skb_headroom(skb), 0),
ENCRYPT_NO)))
goto free;
hdr = skb_push(skb, extra_head);
memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len);
memcpy(skb->data + fast_tx->da_offs, da, ETH_ALEN);
memcpy(skb->data + fast_tx->sa_offs, sa, ETH_ALEN);
info = IEEE80211_SKB_CB(skb);
memset(info, 0, sizeof(*info));
info->band = fast_tx->band;
info->control.vif = &sdata->vif;
info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
IEEE80211_TX_CTL_DONTFRAG;
info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT |
u32_encode_bits(IEEE80211_LINK_UNSPECIFIED,
IEEE80211_TX_CTRL_MLO_LINK);
#ifdef CONFIG_MAC80211_DEBUGFS
if (local->force_tx_status)
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
#endif
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
*ieee80211_get_qos_ctl(hdr) = tid;
}
__skb_queue_head_init(&tx.skbs);
tx.flags = IEEE80211_TX_UNICAST;
tx.local = local;
tx.sdata = sdata;
tx.sta = sta;
tx.key = fast_tx->key;
if (ieee80211_queue_skb(local, sdata, sta, skb))
return;
tx.skb = skb;
r = ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
fast_tx->key, &tx);
tx.skb = NULL;
if (r == TX_DROP)
goto free;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data, u.ap);
__skb_queue_tail(&tx.skbs, skb);
ieee80211_tx_frags(local, &sdata->vif, sta, &tx.skbs, false);
return;
free:
kfree_skb(skb);
}
static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_fast_tx *fast_tx,
struct sk_buff *skb)
{
u16 ethertype = (skb->data[12] << 8) | skb->data[13];
struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
struct tid_ampdu_tx *tid_tx = NULL;
struct sk_buff *next;
struct ethhdr eth;
u8 tid = IEEE80211_NUM_TIDS;
/* control port protocol needs a lot of special handling */
if (cpu_to_be16(ethertype) == sdata->control_port_protocol)
return false;
/* only RFC 1042 SNAP */
if (ethertype < ETH_P_802_3_MIN)
return false;
/* don't handle TX status request here either */
if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)
return false;
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
if (tid_tx) {
if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state))
return false;
if (tid_tx->timeout)
tid_tx->last_tx = jiffies;
}
}
memcpy(ð, skb->data, ETH_HLEN - 2);
/* after this point (skb is modified) we cannot return false */
skb = ieee80211_tx_skb_fixup(skb, ieee80211_sdata_netdev_features(sdata));
if (!skb)
return true;
skb_list_walk_safe(skb, skb, next) {
skb_mark_not_on_list(skb);
__ieee80211_xmit_fast(sdata, sta, fast_tx, skb, tid_tx,
eth.h_dest, eth.h_source);
}
return true;
}
struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *txqi = container_of(txq, struct txq_info, txq);
struct ieee80211_hdr *hdr;
struct sk_buff *skb = NULL;
struct fq *fq = &local->fq;
struct fq_tin *tin = &txqi->tin;
struct ieee80211_tx_info *info;
struct ieee80211_tx_data tx;
ieee80211_tx_result r;
struct ieee80211_vif *vif = txq->vif;
int q = vif->hw_queue[txq->ac];
unsigned long flags;
bool q_stopped;
WARN_ON_ONCE(softirq_count() == 0);
if (!ieee80211_txq_airtime_check(hw, txq))
return NULL;
begin:
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
q_stopped = local->queue_stop_reasons[q];
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
if (unlikely(q_stopped)) {
/* mark for waking later */
set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags);
return NULL;
}
spin_lock_bh(&fq->lock);
/* Make sure fragments stay together. */
skb = __skb_dequeue(&txqi->frags);
if (unlikely(skb)) {
if (!(IEEE80211_SKB_CB(skb)->control.flags &
IEEE80211_TX_INTCFL_NEED_TXPROCESSING))
goto out;
IEEE80211_SKB_CB(skb)->control.flags &=
~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
} else {
if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags)))
goto out;
skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
}
if (!skb)
goto out;
spin_unlock_bh(&fq->lock);
hdr = (struct ieee80211_hdr *)skb->data;
info = IEEE80211_SKB_CB(skb);
memset(&tx, 0, sizeof(tx));
__skb_queue_head_init(&tx.skbs);
tx.local = local;
tx.skb = skb;
tx.sdata = vif_to_sdata(info->control.vif);
if (txq->sta) {
tx.sta = container_of(txq->sta, struct sta_info, sta);
/*
* Drop unicast frames to unauthorised stations unless they are
* injected frames or EAPOL frames from the local station.
*/
if (unlikely(!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
ieee80211_is_data(hdr->frame_control) &&
!ieee80211_vif_is_mesh(&tx.sdata->vif) &&
tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
!is_multicast_ether_addr(hdr->addr1) &&
!test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
(!(info->control.flags &
IEEE80211_TX_CTRL_PORT_CTRL_PROTO) ||
!ieee80211_is_our_addr(tx.sdata, hdr->addr2,
NULL)))) {
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
ieee80211_free_txskb(&local->hw, skb);
goto begin;
}
}
/*
* The key can be removed while the packet was queued, so need to call
* this here to get the current key.
*/
r = ieee80211_tx_h_select_key(&tx);
if (r != TX_CONTINUE) {
ieee80211_free_txskb(&local->hw, skb);
goto begin;
}
if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
info->flags |= (IEEE80211_TX_CTL_AMPDU |
IEEE80211_TX_CTL_DONTFRAG);
if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
r = ieee80211_tx_h_rate_ctrl(&tx);
if (r != TX_CONTINUE) {
ieee80211_free_txskb(&local->hw, skb);
goto begin;
}
}
goto encap_out;
}
if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
struct sta_info *sta = container_of(txq->sta, struct sta_info,
sta);
u8 pn_offs = 0;
if (tx.key &&
(tx.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))
pn_offs = ieee80211_hdrlen(hdr->frame_control);
r = ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
tx.key, &tx);
if (r != TX_CONTINUE) {
ieee80211_free_txskb(&local->hw, skb);
goto begin;
}
} else {
if (invoke_tx_handlers_late(&tx))
goto begin;
skb = __skb_dequeue(&tx.skbs);
if (!skb_queue_empty(&tx.skbs)) {
spin_lock_bh(&fq->lock);
skb_queue_splice_tail(&tx.skbs, &txqi->frags);
spin_unlock_bh(&fq->lock);
}
}
if (skb_has_frag_list(skb) &&
!ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
if (skb_linearize(skb)) {
ieee80211_free_txskb(&local->hw, skb);
goto begin;
}
}
switch (tx.sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
vif = &tx.sdata->vif;
break;
}
tx.sdata = rcu_dereference(local->monitor_sdata);
if (tx.sdata) {
vif = &tx.sdata->vif;
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
} else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
ieee80211_free_txskb(&local->hw, skb);
goto begin;
} else {
vif = NULL;
}
break;
case NL80211_IFTYPE_AP_VLAN:
tx.sdata = container_of(tx.sdata->bss,
struct ieee80211_sub_if_data, u.ap);
fallthrough;
default:
vif = &tx.sdata->vif;
break;
}
encap_out:
IEEE80211_SKB_CB(skb)->control.vif = vif;
if (tx.sta &&
wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
bool ampdu = txq->ac != IEEE80211_AC_VO;
u32 airtime;
airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta,
skb->len, ampdu);
if (airtime) {
airtime = ieee80211_info_set_tx_time_est(info, airtime);
ieee80211_sta_update_pending_airtime(local, tx.sta,
txq->ac,
airtime,
false);
}
}
return skb;
out:
spin_unlock_bh(&fq->lock);
return skb;
}
EXPORT_SYMBOL(ieee80211_tx_dequeue);
static inline s32 ieee80211_sta_deficit(struct sta_info *sta, u8 ac)
{
struct airtime_info *air_info = &sta->airtime[ac];
return air_info->deficit - atomic_read(&air_info->aql_tx_pending);
}
static void
ieee80211_txq_set_active(struct txq_info *txqi)
{
struct sta_info *sta;
if (!txqi->txq.sta)
return;
sta = container_of(txqi->txq.sta, struct sta_info, sta);
sta->airtime[txqi->txq.ac].last_active = (u32)jiffies;
}
static bool
ieee80211_txq_keep_active(struct txq_info *txqi)
{
struct sta_info *sta;
u32 diff;
if (!txqi->txq.sta)
return false;
sta = container_of(txqi->txq.sta, struct sta_info, sta);
if (ieee80211_sta_deficit(sta, txqi->txq.ac) >= 0)
return false;
diff = (u32)jiffies - sta->airtime[txqi->txq.ac].last_active;
return diff <= AIRTIME_ACTIVE_DURATION;
}
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_txq *ret = NULL;
struct txq_info *txqi = NULL, *head = NULL;
bool found_eligible_txq = false;
spin_lock_bh(&local->active_txq_lock[ac]);
if (!local->schedule_round[ac])
goto out;
begin:
txqi = list_first_entry_or_null(&local->active_txqs[ac],
struct txq_info,
schedule_order);
if (!txqi)
goto out;
if (txqi == head) {
if (!found_eligible_txq)
goto out;
else
found_eligible_txq = false;
}
if (!head)
head = txqi;
if (txqi->txq.sta) {
struct sta_info *sta = container_of(txqi->txq.sta,
struct sta_info, sta);
bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
s32 deficit = ieee80211_sta_deficit(sta, txqi->txq.ac);
if (aql_check)
found_eligible_txq = true;
if (deficit < 0)
sta->airtime[txqi->txq.ac].deficit +=
sta->airtime_weight;
if (deficit < 0 || !aql_check) {
list_move_tail(&txqi->schedule_order,
&local->active_txqs[txqi->txq.ac]);
goto begin;
}
}
if (txqi->schedule_round == local->schedule_round[ac])
goto out;
list_del_init(&txqi->schedule_order);
txqi->schedule_round = local->schedule_round[ac];
ret = &txqi->txq;
out:
spin_unlock_bh(&local->active_txq_lock[ac]);
return ret;
}
EXPORT_SYMBOL(ieee80211_next_txq);
void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
struct ieee80211_txq *txq,
bool force)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *txqi = to_txq_info(txq);
bool has_queue;
spin_lock_bh(&local->active_txq_lock[txq->ac]);
has_queue = force || txq_has_queue(txq);
if (list_empty(&txqi->schedule_order) &&
(has_queue || ieee80211_txq_keep_active(txqi))) {
/* If airtime accounting is active, always enqueue STAs at the
* head of the list to ensure that they only get moved to the
* back by the airtime DRR scheduler once they have a negative
* deficit. A station that already has a negative deficit will
* get immediately moved to the back of the list on the next
* call to ieee80211_next_txq().
*/
if (txqi->txq.sta && local->airtime_flags && has_queue &&
wiphy_ext_feature_isset(local->hw.wiphy,
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
list_add(&txqi->schedule_order,
&local->active_txqs[txq->ac]);
else
list_add_tail(&txqi->schedule_order,
&local->active_txqs[txq->ac]);
if (has_queue)
ieee80211_txq_set_active(txqi);
}
spin_unlock_bh(&local->active_txq_lock[txq->ac]);
}
EXPORT_SYMBOL(__ieee80211_schedule_txq);
DEFINE_STATIC_KEY_FALSE(aql_disable);
bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
struct sta_info *sta;
struct ieee80211_local *local = hw_to_local(hw);
if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
return true;
if (static_branch_unlikely(&aql_disable))
return true;
if (!txq->sta)
return true;
if (unlikely(txq->tid == IEEE80211_NUM_TIDS))
return true;
sta = container_of(txq->sta, struct sta_info, sta);
if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
sta->airtime[txq->ac].aql_limit_low)
return true;
if (atomic_read(&local->aql_total_pending_airtime) <
local->aql_threshold &&
atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
sta->airtime[txq->ac].aql_limit_high)
return true;
return false;
}
EXPORT_SYMBOL(ieee80211_txq_airtime_check);
static bool
ieee80211_txq_schedule_airtime_check(struct ieee80211_local *local, u8 ac)
{
unsigned int num_txq = 0;
struct txq_info *txq;
u32 aql_limit;
if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
return true;
list_for_each_entry(txq, &local->active_txqs[ac], schedule_order)
num_txq++;
aql_limit = (num_txq - 1) * local->aql_txq_limit_low[ac] / 2 +
local->aql_txq_limit_high[ac];
return atomic_read(&local->aql_ac_pending_airtime[ac]) < aql_limit;
}
bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
struct sta_info *sta;
u8 ac = txq->ac;
spin_lock_bh(&local->active_txq_lock[ac]);
if (!txqi->txq.sta)
goto out;
if (list_empty(&txqi->schedule_order))
goto out;
if (!ieee80211_txq_schedule_airtime_check(local, ac))
goto out;
list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
schedule_order) {
if (iter == txqi)
break;
if (!iter->txq.sta) {
list_move_tail(&iter->schedule_order,
&local->active_txqs[ac]);
continue;
}
sta = container_of(iter->txq.sta, struct sta_info, sta);
if (ieee80211_sta_deficit(sta, ac) < 0)
sta->airtime[ac].deficit += sta->airtime_weight;
list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
}
sta = container_of(txqi->txq.sta, struct sta_info, sta);
if (sta->airtime[ac].deficit >= 0)
goto out;
sta->airtime[ac].deficit += sta->airtime_weight;
list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
spin_unlock_bh(&local->active_txq_lock[ac]);
return false;
out:
if (!list_empty(&txqi->schedule_order))
list_del_init(&txqi->schedule_order);
spin_unlock_bh(&local->active_txq_lock[ac]);
return true;
}
EXPORT_SYMBOL(ieee80211_txq_may_transmit);
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
{
struct ieee80211_local *local = hw_to_local(hw);
spin_lock_bh(&local->active_txq_lock[ac]);
if (ieee80211_txq_schedule_airtime_check(local, ac)) {
local->schedule_round[ac]++;
if (!local->schedule_round[ac])
local->schedule_round[ac]++;
} else {
local->schedule_round[ac] = 0;
}
spin_unlock_bh(&local->active_txq_lock[ac]);
}
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,
u32 info_flags,
u32 ctrl_flags,
u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
struct sk_buff *next;
int len = skb->len;
if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) {
kfree_skb(skb);
return;
}
sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
rcu_read_lock();
if (ieee80211_vif_is_mesh(&sdata->vif) &&
ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT) &&
ieee80211_mesh_xmit_fast(sdata, skb, ctrl_flags))
goto out;
if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
goto out_free;
if (IS_ERR(sta))
sta = NULL;
skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, sta, skb));
ieee80211_aggr_check(sdata, sta, skb);
if (sta) {
struct ieee80211_fast_tx *fast_tx;
fast_tx = rcu_dereference(sta->fast_tx);
if (fast_tx &&
ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
goto out;
}
/* the frame could be fragmented, software-encrypted, and other
* things so we cannot really handle checksum or GSO offload.
* fix it up in software before we handle anything else.
*/
skb = ieee80211_tx_skb_fixup(skb, 0);
if (!skb) {
len = 0;
goto out;
}
skb_list_walk_safe(skb, skb, next) {
skb_mark_not_on_list(skb);
if (skb->protocol == sdata->control_port_protocol)
ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
skb = ieee80211_build_hdr(sdata, skb, info_flags,
sta, ctrl_flags, cookie);
if (IS_ERR(skb)) {
kfree_skb_list(next);
goto out;
}
dev_sw_netstats_tx_add(dev, 1, skb->len);
ieee80211_xmit(sdata, sta, skb);
}
goto out;
out_free:
kfree_skb(skb);
len = 0;
out:
if (len)
ieee80211_tpt_led_trig_tx(local, len);
rcu_read_unlock();
}
static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta)
{
struct ethhdr *eth;
int err;
err = skb_ensure_writable(skb, ETH_HLEN);
if (unlikely(err))
return err;
eth = (void *)skb->data;
ether_addr_copy(eth->h_dest, sta->sta.addr);
return 0;
}
static bool ieee80211_multicast_to_unicast(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
const struct ethhdr *eth = (void *)skb->data;
const struct vlan_ethhdr *ethvlan = (void *)skb->data;
__be16 ethertype;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
if (sdata->u.vlan.sta)
return false;
if (sdata->wdev.use_4addr)
return false;
fallthrough;
case NL80211_IFTYPE_AP:
/* check runtime toggle for this bss */
if (!sdata->bss->multicast_to_unicast)
return false;
break;
default:
return false;
}
/* multicast to unicast conversion only for some payload */
ethertype = eth->h_proto;
if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
ethertype = ethvlan->h_vlan_encapsulated_proto;
switch (ethertype) {
case htons(ETH_P_ARP):
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
break;
default:
return false;
}
return true;
}
static void
ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev,
struct sk_buff_head *queue)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
const struct ethhdr *eth = (struct ethhdr *)skb->data;
struct sta_info *sta, *first = NULL;
struct sk_buff *cloned_skb;
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sdata != sta->sdata)
/* AP-VLAN mismatch */
continue;
if (unlikely(ether_addr_equal(eth->h_source, sta->sta.addr)))
/* do not send back to source */
continue;
if (!first) {
first = sta;
continue;
}
cloned_skb = skb_clone(skb, GFP_ATOMIC);
if (!cloned_skb)
goto multicast;
if (unlikely(ieee80211_change_da(cloned_skb, sta))) {
dev_kfree_skb(cloned_skb);
goto multicast;
}
__skb_queue_tail(queue, cloned_skb);
}
if (likely(first)) {
if (unlikely(ieee80211_change_da(skb, first)))
goto multicast;
__skb_queue_tail(queue, skb);
} else {
/* no STA connected, drop */
kfree_skb(skb);
skb = NULL;
}
goto out;
multicast:
__skb_queue_purge(queue);
__skb_queue_tail(queue, skb);
out:
rcu_read_unlock();
}
static void ieee80211_mlo_multicast_tx_one(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 ctrl_flags,
unsigned int link_id)
{
struct sk_buff *out;
out = skb_copy(skb, GFP_ATOMIC);
if (!out)
return;
ctrl_flags |= u32_encode_bits(link_id, IEEE80211_TX_CTRL_MLO_LINK);
__ieee80211_subif_start_xmit(out, sdata->dev, 0, ctrl_flags, NULL);
}
static void ieee80211_mlo_multicast_tx(struct net_device *dev,
struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
unsigned long links = sdata->vif.active_links;
unsigned int link;
u32 ctrl_flags = IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX;
if (hweight16(links) == 1) {
ctrl_flags |= u32_encode_bits(__ffs(links),
IEEE80211_TX_CTRL_MLO_LINK);
__ieee80211_subif_start_xmit(skb, sdata->dev, 0, ctrl_flags,
NULL);
return;
}
for_each_set_bit(link, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
ieee80211_mlo_multicast_tx_one(sdata, skb, ctrl_flags, link);
ctrl_flags = 0;
}
kfree_skb(skb);
}
/**
* ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs
* @skb: packet to be sent
* @dev: incoming interface
*
* On failure skb will be freed.
*/
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
const struct ethhdr *eth = (void *)skb->data;
if (likely(!is_multicast_ether_addr(eth->h_dest)))
goto normal;
if (unlikely(!ieee80211_sdata_running(sdata))) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) {
struct sk_buff_head queue;
__skb_queue_head_init(&queue);
ieee80211_convert_to_unicast(skb, dev, &queue);
while ((skb = __skb_dequeue(&queue)))
__ieee80211_subif_start_xmit(skb, dev, 0,
IEEE80211_TX_CTRL_MLO_LINK_UNSPEC,
NULL);
} else if (ieee80211_vif_is_mld(&sdata->vif) &&
sdata->vif.type == NL80211_IFTYPE_AP &&
!ieee80211_hw_check(&sdata->local->hw, MLO_MCAST_MULTI_LINK_TX)) {
ieee80211_mlo_multicast_tx(dev, skb);
} else {
normal:
__ieee80211_subif_start_xmit(skb, dev, 0,
IEEE80211_TX_CTRL_MLO_LINK_UNSPEC,
NULL);
}
return NETDEV_TX_OK;
}
static bool __ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, struct sta_info *sta,
bool txpending)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_control control = {};
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *pubsta = NULL;
unsigned long flags;
int q = info->hw_queue;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
if (local->queue_stop_reasons[q] ||
(!txpending && !skb_queue_empty(&local->pending[q]))) {
if (txpending)
skb_queue_head(&local->pending[q], skb);
else
skb_queue_tail(&local->pending[q], skb);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
return false;
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
if (sta && sta->uploaded)
pubsta = &sta->sta;
control.sta = pubsta;
drv_tx(local, &control, skb);
return true;
}
static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, struct sta_info *sta,
bool txpending)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *next;
bool ret = true;
if (ieee80211_queue_skb(local, sdata, sta, skb))
return true;
skb_list_walk_safe(skb, skb, next) {
skb_mark_not_on_list(skb);
if (!__ieee80211_tx_8023(sdata, skb, sta, txpending))
ret = false;
}
return ret;
}
static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
struct net_device *dev, struct sta_info *sta,
struct ieee80211_key *key, struct sk_buff *skb)
{
struct ieee80211_tx_info *info;
struct ieee80211_local *local = sdata->local;
struct tid_ampdu_tx *tid_tx;
struct sk_buff *seg, *next;
unsigned int skbs = 0, len = 0;
u16 queue;
u8 tid;
queue = ieee80211_select_queue(sdata, sta, skb);
skb_set_queue_mapping(skb, queue);
if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) &&
test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
goto out_free;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return;
ieee80211_aggr_check(sdata, sta, skb);
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
if (tid_tx) {
if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
/* fall back to non-offload slow path */
__ieee80211_subif_start_xmit(skb, dev, 0,
IEEE80211_TX_CTRL_MLO_LINK_UNSPEC,
NULL);
return;
}
if (tid_tx->timeout)
tid_tx->last_tx = jiffies;
}
skb = ieee80211_tx_skb_fixup(skb, ieee80211_sdata_netdev_features(sdata));
if (!skb)
return;
info = IEEE80211_SKB_CB(skb);
memset(info, 0, sizeof(*info));
info->hw_queue = sdata->vif.hw_queue[queue];
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data, u.ap);
info->flags |= IEEE80211_TX_CTL_HW_80211_ENCAP;
info->control.vif = &sdata->vif;
if (key)
info->control.hw_key = &key->conf;
skb_list_walk_safe(skb, seg, next) {
skbs++;
len += seg->len;
if (seg != skb)
memcpy(IEEE80211_SKB_CB(seg), info, sizeof(*info));
}
if (unlikely(skb->sk &&
skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
&info->flags, NULL);
dev_sw_netstats_tx_add(dev, skbs, len);
sta->deflink.tx_stats.packets[queue] += skbs;
sta->deflink.tx_stats.bytes[queue] += len;
ieee80211_tpt_led_trig_tx(local, len);
ieee80211_tx_8023(sdata, skb, sta, false);
return;
out_free:
kfree_skb(skb);
}
netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ethhdr *ehdr = (struct ethhdr *)skb->data;
struct ieee80211_key *key;
struct sta_info *sta;
if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
rcu_read_lock();
if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
kfree_skb(skb);
goto out;
}
if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
sdata->control_port_protocol == ehdr->h_proto))
goto skip_offload;
key = rcu_dereference(sta->ptk[sta->ptk_idx]);
if (!key)
key = rcu_dereference(sdata->default_unicast_key);
if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
goto skip_offload;
sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
ieee80211_8023_xmit(sdata, dev, sta, key, skb);
goto out;
skip_offload:
ieee80211_subif_start_xmit(skb, dev);
out:
rcu_read_unlock();
return NETDEV_TX_OK;
}
struct sk_buff *
ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 info_flags)
{
struct ieee80211_hdr *hdr;
struct ieee80211_tx_data tx = {
.local = sdata->local,
.sdata = sdata,
};
struct sta_info *sta;
rcu_read_lock();
if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
kfree_skb(skb);
skb = ERR_PTR(-EINVAL);
goto out;
}
skb = ieee80211_build_hdr(sdata, skb, info_flags, sta,
IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, NULL);
if (IS_ERR(skb))
goto out;
hdr = (void *)skb->data;
tx.sta = sta_info_get(sdata, hdr->addr1);
tx.skb = skb;
if (ieee80211_tx_h_select_key(&tx) != TX_CONTINUE) {
rcu_read_unlock();
kfree_skb(skb);
return ERR_PTR(-EINVAL);
}
out:
rcu_read_unlock();
return skb;
}
/*
* ieee80211_clear_tx_pending may not be called in a context where
* it is possible that it packets could come in again.
*/
void ieee80211_clear_tx_pending(struct ieee80211_local *local)
{
struct sk_buff *skb;
int i;
for (i = 0; i < local->hw.queues; i++) {
while ((skb = skb_dequeue(&local->pending[i])) != NULL)
ieee80211_free_txskb(&local->hw, skb);
}
}
/*
* Returns false if the frame couldn't be transmitted but was queued instead,
* which in this case means re-queued -- take as an indication to stop sending
* more pending frames.
*/
static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
struct ieee80211_hdr *hdr;
bool result;
struct ieee80211_chanctx_conf *chanctx_conf;
sdata = vif_to_sdata(info->control.vif);
if (info->control.flags & IEEE80211_TX_INTCFL_NEED_TXPROCESSING) {
/* update band only for non-MLD */
if (!ieee80211_vif_is_mld(&sdata->vif)) {
chanctx_conf =
rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (unlikely(!chanctx_conf)) {
dev_kfree_skb(skb);
return true;
}
info->band = chanctx_conf->def.chan->band;
}
result = ieee80211_tx(sdata, NULL, skb, true);
} else if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
dev_kfree_skb(skb);
return true;
}
if (IS_ERR(sta) || (sta && !sta->uploaded))
sta = NULL;
result = ieee80211_tx_8023(sdata, skb, sta, true);
} else {
struct sk_buff_head skbs;
__skb_queue_head_init(&skbs);
__skb_queue_tail(&skbs, skb);
hdr = (struct ieee80211_hdr *)skb->data;
sta = sta_info_get(sdata, hdr->addr1);
result = __ieee80211_tx(local, &skbs, sta, true);
}
return result;
}
/*
* Transmit all pending packets. Called from tasklet.
*/
void ieee80211_tx_pending(struct tasklet_struct *t)
{
struct ieee80211_local *local = from_tasklet(local, t,
tx_pending_tasklet);
unsigned long flags;
int i;
bool txok;
rcu_read_lock();
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (i = 0; i < local->hw.queues; i++) {
/*
* If queue is stopped by something other than due to pending
* frames, or we have no pending frames, proceed to next queue.
*/
if (local->queue_stop_reasons[i] ||
skb_queue_empty(&local->pending[i]))
continue;
while (!skb_queue_empty(&local->pending[i])) {
struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (WARN_ON(!info->control.vif)) {
ieee80211_free_txskb(&local->hw, skb);
continue;
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
flags);
txok = ieee80211_tx_pending_skb(local, skb);
spin_lock_irqsave(&local->queue_stop_reason_lock,
flags);
if (!txok)
break;
}
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
rcu_read_unlock();
}
/* functions for drivers to get certain frames */
static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
struct ps_data *ps, struct sk_buff *skb,
bool is_template)
{
u8 *pos, *tim;
int aid0 = 0;
int i, have_bits = 0, n1, n2;
struct ieee80211_bss_conf *link_conf = link->conf;
/* Generate bitmap for TIM only if there are any STAs in power save
* mode. */
if (atomic_read(&ps->num_sta_ps) > 0)
/* in the hope that this is faster than
* checking byte-for-byte */
have_bits = !bitmap_empty((unsigned long *)ps->tim,
IEEE80211_MAX_AID+1);
if (!is_template) {
if (ps->dtim_count == 0)
ps->dtim_count = link_conf->dtim_period - 1;
else
ps->dtim_count--;
}
tim = pos = skb_put(skb, 5);
*pos++ = WLAN_EID_TIM;
*pos++ = 3;
*pos++ = ps->dtim_count;
*pos++ = link_conf->dtim_period;
if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf))
aid0 = 1;
ps->dtim_bc_mc = aid0 == 1;
if (have_bits) {
/* Find largest even number N1 so that bits numbered 1 through
* (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
* (N2 + 1) x 8 through 2007 are 0. */
n1 = 0;
for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
if (ps->tim[i]) {
n1 = i & 0xfe;
break;
}
}
n2 = n1;
for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
if (ps->tim[i]) {
n2 = i;
break;
}
}
/* Bitmap control */
*pos++ = n1 | aid0;
/* Part Virt Bitmap */
skb_put_data(skb, ps->tim + n1, n2 - n1 + 1);
tim[1] = n2 - n1 + 4;
} else {
*pos++ = aid0; /* Bitmap control */
if (ieee80211_get_link_sband(link)->band != NL80211_BAND_S1GHZ) {
tim[1] = 4;
/* Part Virt Bitmap */
skb_put_u8(skb, 0);
}
}
}
static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
struct ps_data *ps, struct sk_buff *skb,
bool is_template)
{
struct ieee80211_local *local = sdata->local;
/*
* Not very nice, but we want to allow the driver to call
* ieee80211_beacon_get() as a response to the set_tim()
* callback. That, however, is already invoked under the
* sta_lock to guarantee consistent and race-free update
* of the tim bitmap in mac80211 and the driver.
*/
if (local->tim_in_locked_section) {
__ieee80211_beacon_add_tim(sdata, link, ps, skb, is_template);
} else {
spin_lock_bh(&local->tim_lock);
__ieee80211_beacon_add_tim(sdata, link, ps, skb, is_template);
spin_unlock_bh(&local->tim_lock);
}
return 0;
}
static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data *sdata,
struct beacon_data *beacon,
struct ieee80211_link_data *link)
{
u8 *beacon_data, count, max_count = 1;
struct probe_resp *resp;
size_t beacon_data_len;
u16 *bcn_offsets;
int i;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
beacon_data = beacon->tail;
beacon_data_len = beacon->tail_len;
break;
case NL80211_IFTYPE_ADHOC:
beacon_data = beacon->head;
beacon_data_len = beacon->head_len;
break;
case NL80211_IFTYPE_MESH_POINT:
beacon_data = beacon->head;
beacon_data_len = beacon->head_len;
break;
default:
return;
}
resp = rcu_dereference(link->u.ap.probe_resp);
bcn_offsets = beacon->cntdwn_counter_offsets;
count = beacon->cntdwn_current_counter;
if (link->conf->csa_active)
max_count = IEEE80211_MAX_CNTDWN_COUNTERS_NUM;
for (i = 0; i < max_count; ++i) {
if (bcn_offsets[i]) {
if (WARN_ON_ONCE(bcn_offsets[i] >= beacon_data_len))
return;
beacon_data[bcn_offsets[i]] = count;
}
if (sdata->vif.type == NL80211_IFTYPE_AP && resp) {
u16 *resp_offsets = resp->cntdwn_counter_offsets;
resp->data[resp_offsets[i]] = count;
}
}
}
static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon)
{
beacon->cntdwn_current_counter--;
/* the counter should never reach 0 */
WARN_ON_ONCE(!beacon->cntdwn_current_counter);
return beacon->cntdwn_current_counter;
}
u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct beacon_data *beacon = NULL;
u8 count = 0;
rcu_read_lock();
if (sdata->vif.type == NL80211_IFTYPE_AP)
beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
beacon = rcu_dereference(sdata->u.ibss.presp);
else if (ieee80211_vif_is_mesh(&sdata->vif))
beacon = rcu_dereference(sdata->u.mesh.beacon);
if (!beacon)
goto unlock;
count = __ieee80211_beacon_update_cntdwn(beacon);
unlock:
rcu_read_unlock();
return count;
}
EXPORT_SYMBOL(ieee80211_beacon_update_cntdwn);
void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct beacon_data *beacon = NULL;
rcu_read_lock();
if (sdata->vif.type == NL80211_IFTYPE_AP)
beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
beacon = rcu_dereference(sdata->u.ibss.presp);
else if (ieee80211_vif_is_mesh(&sdata->vif))
beacon = rcu_dereference(sdata->u.mesh.beacon);
if (!beacon)
goto unlock;
if (counter < beacon->cntdwn_current_counter)
beacon->cntdwn_current_counter = counter;
unlock:
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_beacon_set_cntdwn);
bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct beacon_data *beacon = NULL;
u8 *beacon_data;
size_t beacon_data_len;
int ret = false;
if (!ieee80211_sdata_running(sdata))
return false;
rcu_read_lock();
if (vif->type == NL80211_IFTYPE_AP) {
beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
if (WARN_ON(!beacon || !beacon->tail))
goto out;
beacon_data = beacon->tail;
beacon_data_len = beacon->tail_len;
} else if (vif->type == NL80211_IFTYPE_ADHOC) {
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
beacon = rcu_dereference(ifibss->presp);
if (!beacon)
goto out;
beacon_data = beacon->head;
beacon_data_len = beacon->head_len;
} else if (vif->type == NL80211_IFTYPE_MESH_POINT) {
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
beacon = rcu_dereference(ifmsh->beacon);
if (!beacon)
goto out;
beacon_data = beacon->head;
beacon_data_len = beacon->head_len;
} else {
WARN_ON(1);
goto out;
}
if (!beacon->cntdwn_counter_offsets[0])
goto out;
if (WARN_ON_ONCE(beacon->cntdwn_counter_offsets[0] > beacon_data_len))
goto out;
if (beacon_data[beacon->cntdwn_counter_offsets[0]] == 1)
ret = true;
out:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(ieee80211_beacon_cntdwn_is_complete);
static int ieee80211_beacon_protect(struct sk_buff *skb,
struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link)
{
ieee80211_tx_result res;
struct ieee80211_tx_data tx;
struct sk_buff *check_skb;
memset(&tx, 0, sizeof(tx));
tx.key = rcu_dereference(link->default_beacon_key);
if (!tx.key)
return 0;
if (unlikely(tx.key->flags & KEY_FLAG_TAINTED)) {
tx.key = NULL;
return -EINVAL;
}
if (!(tx.key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT_TX) &&
tx.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
IEEE80211_SKB_CB(skb)->control.hw_key = &tx.key->conf;
tx.local = local;
tx.sdata = sdata;
__skb_queue_head_init(&tx.skbs);
__skb_queue_tail(&tx.skbs, skb);
res = ieee80211_tx_h_encrypt(&tx);
check_skb = __skb_dequeue(&tx.skbs);
/* we may crash after this, but it'd be a bug in crypto */
WARN_ON(check_skb != skb);
if (WARN_ON_ONCE(res != TX_CONTINUE))
return -EINVAL;
return 0;
}
static void
ieee80211_beacon_get_finish(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_link_data *link,
struct ieee80211_mutable_offsets *offs,
struct beacon_data *beacon,
struct sk_buff *skb,
struct ieee80211_chanctx_conf *chanctx_conf,
u16 csa_off_base)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_tx_info *info;
enum nl80211_band band;
struct ieee80211_tx_rate_control txrc;
/* CSA offsets */
if (offs && beacon) {
u16 i;
for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; i++) {
u16 csa_off = beacon->cntdwn_counter_offsets[i];
if (!csa_off)
continue;
offs->cntdwn_counter_offs[i] = csa_off_base + csa_off;
}
}
band = chanctx_conf->def.chan->band;
info = IEEE80211_SKB_CB(skb);
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
info->flags |= IEEE80211_TX_CTL_NO_ACK;
info->band = band;
memset(&txrc, 0, sizeof(txrc));
txrc.hw = hw;
txrc.sband = local->hw.wiphy->bands[band];
txrc.bss_conf = link->conf;
txrc.skb = skb;
txrc.reported_rate.idx = -1;
if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band])
txrc.rate_idx_mask = sdata->beacon_rateidx_mask[band];
else
txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
txrc.bss = true;
rate_control_get_rate(sdata, NULL, &txrc);
info->control.vif = vif;
info->control.flags |= u32_encode_bits(link->link_id,
IEEE80211_TX_CTRL_MLO_LINK);
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
IEEE80211_TX_CTL_ASSIGN_SEQ |
IEEE80211_TX_CTL_FIRST_FRAGMENT;
}
static void
ieee80211_beacon_add_mbssid(struct sk_buff *skb, struct beacon_data *beacon,
u8 i)
{
if (!beacon->mbssid_ies || !beacon->mbssid_ies->cnt ||
i > beacon->mbssid_ies->cnt)
return;
if (i < beacon->mbssid_ies->cnt) {
skb_put_data(skb, beacon->mbssid_ies->elem[i].data,
beacon->mbssid_ies->elem[i].len);
if (beacon->rnr_ies && beacon->rnr_ies->cnt) {
skb_put_data(skb, beacon->rnr_ies->elem[i].data,
beacon->rnr_ies->elem[i].len);
for (i = beacon->mbssid_ies->cnt; i < beacon->rnr_ies->cnt; i++)
skb_put_data(skb, beacon->rnr_ies->elem[i].data,
beacon->rnr_ies->elem[i].len);
}
return;
}
/* i == beacon->mbssid_ies->cnt, include all MBSSID elements */
for (i = 0; i < beacon->mbssid_ies->cnt; i++)
skb_put_data(skb, beacon->mbssid_ies->elem[i].data,
beacon->mbssid_ies->elem[i].len);
}
static struct sk_buff *
ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_link_data *link,
struct ieee80211_mutable_offsets *offs,
bool is_template,
struct beacon_data *beacon,
struct ieee80211_chanctx_conf *chanctx_conf,
u8 ema_index)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_if_ap *ap = &sdata->u.ap;
struct sk_buff *skb = NULL;
u16 csa_off_base = 0;
int mbssid_len;
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
ieee80211_beacon_update_cntdwn(vif);
ieee80211_set_beacon_cntdwn(sdata, beacon, link);
}
/* headroom, head length,
* tail length, maximum TIM length and multiple BSSID length
*/
mbssid_len = ieee80211_get_mbssid_beacon_len(beacon->mbssid_ies,
beacon->rnr_ies,
ema_index);
skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
beacon->tail_len + 256 +
local->hw.extra_beacon_tailroom + mbssid_len);
if (!skb)
return NULL;
skb_reserve(skb, local->tx_headroom);
skb_put_data(skb, beacon->head, beacon->head_len);
ieee80211_beacon_add_tim(sdata, link, &ap->ps, skb, is_template);
if (offs) {
offs->tim_offset = beacon->head_len;
offs->tim_length = skb->len - beacon->head_len;
offs->cntdwn_counter_offs[0] = beacon->cntdwn_counter_offsets[0];
if (mbssid_len) {
ieee80211_beacon_add_mbssid(skb, beacon, ema_index);
offs->mbssid_off = skb->len - mbssid_len;
}
/* for AP the csa offsets are from tail */
csa_off_base = skb->len;
}
if (beacon->tail)
skb_put_data(skb, beacon->tail, beacon->tail_len);
if (ieee80211_beacon_protect(skb, local, sdata, link) < 0)
return NULL;
ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb,
chanctx_conf, csa_off_base);
return skb;
}
static struct ieee80211_ema_beacons *
ieee80211_beacon_get_ap_ema_list(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_link_data *link,
struct ieee80211_mutable_offsets *offs,
bool is_template, struct beacon_data *beacon,
struct ieee80211_chanctx_conf *chanctx_conf)
{
struct ieee80211_ema_beacons *ema = NULL;
if (!beacon->mbssid_ies || !beacon->mbssid_ies->cnt)
return NULL;
ema = kzalloc(struct_size(ema, bcn, beacon->mbssid_ies->cnt),
GFP_ATOMIC);
if (!ema)
return NULL;
for (ema->cnt = 0; ema->cnt < beacon->mbssid_ies->cnt; ema->cnt++) {
ema->bcn[ema->cnt].skb =
ieee80211_beacon_get_ap(hw, vif, link,
&ema->bcn[ema->cnt].offs,
is_template, beacon,
chanctx_conf, ema->cnt);
if (!ema->bcn[ema->cnt].skb)
break;
}
if (ema->cnt == beacon->mbssid_ies->cnt)
return ema;
ieee80211_beacon_free_ema_list(ema);
return NULL;
}
#define IEEE80211_INCLUDE_ALL_MBSSID_ELEMS -1
static struct sk_buff *
__ieee80211_beacon_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_mutable_offsets *offs,
bool is_template,
unsigned int link_id,
int ema_index,
struct ieee80211_ema_beacons **ema_beacons)
{
struct ieee80211_local *local = hw_to_local(hw);
struct beacon_data *beacon = NULL;
struct sk_buff *skb = NULL;
struct ieee80211_sub_if_data *sdata = NULL;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_link_data *link;
rcu_read_lock();
sdata = vif_to_sdata(vif);
link = rcu_dereference(sdata->link[link_id]);
if (!link)
goto out;
chanctx_conf =
rcu_dereference(link->conf->chanctx_conf);
if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
goto out;
if (offs)
memset(offs, 0, sizeof(*offs));
if (sdata->vif.type == NL80211_IFTYPE_AP) {
beacon = rcu_dereference(link->u.ap.beacon);
if (!beacon)
goto out;
if (ema_beacons) {
*ema_beacons =
ieee80211_beacon_get_ap_ema_list(hw, vif, link,
offs,
is_template,
beacon,
chanctx_conf);
} else {
if (beacon->mbssid_ies && beacon->mbssid_ies->cnt) {
if (ema_index >= beacon->mbssid_ies->cnt)
goto out; /* End of MBSSID elements */
if (ema_index <= IEEE80211_INCLUDE_ALL_MBSSID_ELEMS)
ema_index = beacon->mbssid_ies->cnt;
} else {
ema_index = 0;
}
skb = ieee80211_beacon_get_ap(hw, vif, link, offs,
is_template, beacon,
chanctx_conf,
ema_index);
}
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_hdr *hdr;
beacon = rcu_dereference(ifibss->presp);
if (!beacon)
goto out;
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
__ieee80211_beacon_update_cntdwn(beacon);
ieee80211_set_beacon_cntdwn(sdata, beacon, link);
}
skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
local->hw.extra_beacon_tailroom);
if (!skb)
goto out;
skb_reserve(skb, local->tx_headroom);
skb_put_data(skb, beacon->head, beacon->head_len);
hdr = (struct ieee80211_hdr *) skb->data;
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_BEACON);
ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb,
chanctx_conf, 0);
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
beacon = rcu_dereference(ifmsh->beacon);
if (!beacon)
goto out;
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
/* TODO: For mesh csa_counter is in TU, so
* decrementing it by one isn't correct, but
* for now we leave it consistent with overall
* mac80211's behavior.
*/
__ieee80211_beacon_update_cntdwn(beacon);
ieee80211_set_beacon_cntdwn(sdata, beacon, link);
}
if (ifmsh->sync_ops)
ifmsh->sync_ops->adjust_tsf(sdata, beacon);
skb = dev_alloc_skb(local->tx_headroom +
beacon->head_len +
256 + /* TIM IE */
beacon->tail_len +
local->hw.extra_beacon_tailroom);
if (!skb)
goto out;
skb_reserve(skb, local->tx_headroom);
skb_put_data(skb, beacon->head, beacon->head_len);
ieee80211_beacon_add_tim(sdata, link, &ifmsh->ps, skb,
is_template);
if (offs) {
offs->tim_offset = beacon->head_len;
offs->tim_length = skb->len - beacon->head_len;
}
skb_put_data(skb, beacon->tail, beacon->tail_len);
ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb,
chanctx_conf, 0);
} else {
WARN_ON(1);
goto out;
}
out:
rcu_read_unlock();
return skb;
}
struct sk_buff *
ieee80211_beacon_get_template(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_mutable_offsets *offs,
unsigned int link_id)
{
return __ieee80211_beacon_get(hw, vif, offs, true, link_id,
IEEE80211_INCLUDE_ALL_MBSSID_ELEMS, NULL);
}
EXPORT_SYMBOL(ieee80211_beacon_get_template);
struct sk_buff *
ieee80211_beacon_get_template_ema_index(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_mutable_offsets *offs,
unsigned int link_id, u8 ema_index)
{
return __ieee80211_beacon_get(hw, vif, offs, true, link_id, ema_index,
NULL);
}
EXPORT_SYMBOL(ieee80211_beacon_get_template_ema_index);
void ieee80211_beacon_free_ema_list(struct ieee80211_ema_beacons *ema_beacons)
{
u8 i;
if (!ema_beacons)
return;
for (i = 0; i < ema_beacons->cnt; i++)
kfree_skb(ema_beacons->bcn[i].skb);
kfree(ema_beacons);
}
EXPORT_SYMBOL(ieee80211_beacon_free_ema_list);
struct ieee80211_ema_beacons *
ieee80211_beacon_get_template_ema_list(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id)
{
struct ieee80211_ema_beacons *ema_beacons = NULL;
WARN_ON(__ieee80211_beacon_get(hw, vif, NULL, true, link_id, 0,
&ema_beacons));
return ema_beacons;
}
EXPORT_SYMBOL(ieee80211_beacon_get_template_ema_list);
struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 *tim_offset, u16 *tim_length,
unsigned int link_id)
{
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false,
link_id,
IEEE80211_INCLUDE_ALL_MBSSID_ELEMS,
NULL);
struct sk_buff *copy;
int shift;
if (!bcn)
return bcn;
if (tim_offset)
*tim_offset = offs.tim_offset;
if (tim_length)
*tim_length = offs.tim_length;
if (ieee80211_hw_check(hw, BEACON_TX_STATUS) ||
!hw_to_local(hw)->monitors)
return bcn;
/* send a copy to monitor interfaces */
copy = skb_copy(bcn, GFP_ATOMIC);
if (!copy)
return bcn;
shift = ieee80211_vif_get_shift(vif);
ieee80211_tx_monitor(hw_to_local(hw), copy, 1, shift, false, NULL);
return bcn;
}
EXPORT_SYMBOL(ieee80211_beacon_get_tim);
struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct sk_buff *skb = NULL;
struct probe_resp *presp = NULL;
struct ieee80211_hdr *hdr;
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
if (sdata->vif.type != NL80211_IFTYPE_AP)
return NULL;
rcu_read_lock();
presp = rcu_dereference(sdata->deflink.u.ap.probe_resp);
if (!presp)
goto out;
skb = dev_alloc_skb(presp->len);
if (!skb)
goto out;
skb_put_data(skb, presp->data, presp->len);
hdr = (struct ieee80211_hdr *) skb->data;
memset(hdr->addr1, 0, sizeof(hdr->addr1));
out:
rcu_read_unlock();
return skb;
}
EXPORT_SYMBOL(ieee80211_proberesp_get);
struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct sk_buff *skb = NULL;
struct fils_discovery_data *tmpl = NULL;
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
if (sdata->vif.type != NL80211_IFTYPE_AP)
return NULL;
rcu_read_lock();
tmpl = rcu_dereference(sdata->deflink.u.ap.fils_discovery);
if (!tmpl) {
rcu_read_unlock();
return NULL;
}
skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len);
if (skb) {
skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
skb_put_data(skb, tmpl->data, tmpl->len);
}
rcu_read_unlock();
return skb;
}
EXPORT_SYMBOL(ieee80211_get_fils_discovery_tmpl);
struct sk_buff *
ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct sk_buff *skb = NULL;
struct unsol_bcast_probe_resp_data *tmpl = NULL;
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
if (sdata->vif.type != NL80211_IFTYPE_AP)
return NULL;
rcu_read_lock();
tmpl = rcu_dereference(sdata->deflink.u.ap.unsol_bcast_probe_resp);
if (!tmpl) {
rcu_read_unlock();
return NULL;
}
skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len);
if (skb) {
skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
skb_put_data(skb, tmpl->data, tmpl->len);
}
rcu_read_unlock();
return skb;
}
EXPORT_SYMBOL(ieee80211_get_unsol_bcast_probe_resp_tmpl);
struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata;
struct ieee80211_pspoll *pspoll;
struct ieee80211_local *local;
struct sk_buff *skb;
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
return NULL;
sdata = vif_to_sdata(vif);
local = sdata->local;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
if (!skb)
return NULL;
skb_reserve(skb, local->hw.extra_tx_headroom);
pspoll = skb_put_zero(skb, sizeof(*pspoll));
pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_PSPOLL);
pspoll->aid = cpu_to_le16(sdata->vif.cfg.aid);
/* aid in PS-Poll has its two MSBs each set to 1 */
pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
memcpy(pspoll->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(pspoll->ta, vif->addr, ETH_ALEN);
return skb;
}
EXPORT_SYMBOL(ieee80211_pspoll_get);
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
int link_id, bool qos_ok)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_link_data *link = NULL;
struct ieee80211_hdr_3addr *nullfunc;
struct sk_buff *skb;
bool qos = false;
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
return NULL;
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
sizeof(*nullfunc) + 2);
if (!skb)
return NULL;
rcu_read_lock();
if (qos_ok) {
struct sta_info *sta;
sta = sta_info_get(sdata, vif->cfg.ap_addr);
qos = sta && sta->sta.wme;
}
if (link_id >= 0) {
link = rcu_dereference(sdata->link[link_id]);
if (WARN_ON_ONCE(!link)) {
rcu_read_unlock();
kfree_skb(skb);
return NULL;
}
}
skb_reserve(skb, local->hw.extra_tx_headroom);
nullfunc = skb_put_zero(skb, sizeof(*nullfunc));
nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_TODS);
if (qos) {
__le16 qoshdr = cpu_to_le16(7);
BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
IEEE80211_STYPE_NULLFUNC) !=
IEEE80211_STYPE_QOS_NULLFUNC);
nullfunc->frame_control |=
cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
skb->priority = 7;
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
skb_put_data(skb, &qoshdr, sizeof(qoshdr));
}
if (link) {
memcpy(nullfunc->addr1, link->conf->bssid, ETH_ALEN);
memcpy(nullfunc->addr2, link->conf->addr, ETH_ALEN);
memcpy(nullfunc->addr3, link->conf->bssid, ETH_ALEN);
} else {
memcpy(nullfunc->addr1, vif->cfg.ap_addr, ETH_ALEN);
memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
memcpy(nullfunc->addr3, vif->cfg.ap_addr, ETH_ALEN);
}
rcu_read_unlock();
return skb;
}
EXPORT_SYMBOL(ieee80211_nullfunc_get);
struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
const u8 *src_addr,
const u8 *ssid, size_t ssid_len,
size_t tailroom)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_hdr_3addr *hdr;
struct sk_buff *skb;
size_t ie_ssid_len;
u8 *pos;
ie_ssid_len = 2 + ssid_len;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
ie_ssid_len + tailroom);
if (!skb)
return NULL;
skb_reserve(skb, local->hw.extra_tx_headroom);
hdr = skb_put_zero(skb, sizeof(*hdr));
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_REQ);
eth_broadcast_addr(hdr->addr1);
memcpy(hdr->addr2, src_addr, ETH_ALEN);
eth_broadcast_addr(hdr->addr3);
pos = skb_put(skb, ie_ssid_len);
*pos++ = WLAN_EID_SSID;
*pos++ = ssid_len;
if (ssid_len)
memcpy(pos, ssid, ssid_len);
pos += ssid_len;
return skb;
}
EXPORT_SYMBOL(ieee80211_probereq_get);
void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const void *frame, size_t frame_len,
const struct ieee80211_tx_info *frame_txctl,
struct ieee80211_rts *rts)
{
const struct ieee80211_hdr *hdr = frame;
rts->frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
rts->duration = ieee80211_rts_duration(hw, vif, frame_len,
frame_txctl);
memcpy(rts->ra, hdr->addr1, sizeof(rts->ra));
memcpy(rts->ta, hdr->addr2, sizeof(rts->ta));
}
EXPORT_SYMBOL(ieee80211_rts_get);
void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const void *frame, size_t frame_len,
const struct ieee80211_tx_info *frame_txctl,
struct ieee80211_cts *cts)
{
const struct ieee80211_hdr *hdr = frame;
cts->frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
cts->duration = ieee80211_ctstoself_duration(hw, vif,
frame_len, frame_txctl);
memcpy(cts->ra, hdr->addr1, sizeof(cts->ra));
}
EXPORT_SYMBOL(ieee80211_ctstoself_get);
struct sk_buff *
ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ieee80211_local *local = hw_to_local(hw);
struct sk_buff *skb = NULL;
struct ieee80211_tx_data tx;
struct ieee80211_sub_if_data *sdata;
struct ps_data *ps;
struct ieee80211_tx_info *info;
struct ieee80211_chanctx_conf *chanctx_conf;
sdata = vif_to_sdata(vif);
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (!chanctx_conf)
goto out;
if (sdata->vif.type == NL80211_IFTYPE_AP) {
struct beacon_data *beacon =
rcu_dereference(sdata->deflink.u.ap.beacon);
if (!beacon || !beacon->head)
goto out;
ps = &sdata->u.ap.ps;
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
ps = &sdata->u.mesh.ps;
} else {
goto out;
}
if (ps->dtim_count != 0 || !ps->dtim_bc_mc)
goto out; /* send buffered bc/mc only after DTIM beacon */
while (1) {
skb = skb_dequeue(&ps->bc_buf);
if (!skb)
goto out;
local->total_ps_buffered--;
if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) {
struct ieee80211_hdr *hdr =
(struct ieee80211_hdr *) skb->data;
/* more buffered multicast/broadcast frames ==> set
* MoreData flag in IEEE 802.11 header to inform PS
* STAs */
hdr->frame_control |=
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
}
if (sdata->vif.type == NL80211_IFTYPE_AP)
sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
break;
ieee80211_free_txskb(hw, skb);
}
info = IEEE80211_SKB_CB(skb);
tx.flags |= IEEE80211_TX_PS_BUFFERED;
info->band = chanctx_conf->def.chan->band;
if (invoke_tx_handlers(&tx))
skb = NULL;
out:
rcu_read_unlock();
return skb;
}
EXPORT_SYMBOL(ieee80211_get_buffered_bc);
int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
int ret;
u32 queues;
lockdep_assert_held(&local->sta_mtx);
/* only some cases are supported right now */
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
break;
default:
WARN_ON(1);
return -EINVAL;
}
if (WARN_ON(tid >= IEEE80211_NUM_UPS))
return -EINVAL;
if (sta->reserved_tid == tid) {
ret = 0;
goto out;
}
if (sta->reserved_tid != IEEE80211_TID_UNRESERVED) {
sdata_err(sdata, "TID reservation already active\n");
ret = -EALREADY;
goto out;
}
ieee80211_stop_vif_queues(sdata->local, sdata,
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
synchronize_net();
/* Tear down BA sessions so we stop aggregating on this TID */
if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) {
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
__ieee80211_stop_tx_ba_session(sta, tid,
AGG_STOP_LOCAL_REQUEST);
}
queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]);
__ieee80211_flush_queues(local, sdata, queues, false);
sta->reserved_tid = tid;
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION))
clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
ret = 0;
out:
return ret;
}
EXPORT_SYMBOL(ieee80211_reserve_tid);
void ieee80211_unreserve_tid(struct ieee80211_sta *pubsta, u8 tid)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sub_if_data *sdata = sta->sdata;
lockdep_assert_held(&sdata->local->sta_mtx);
/* only some cases are supported right now */
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
break;
default:
WARN_ON(1);
return;
}
if (tid != sta->reserved_tid) {
sdata_err(sdata, "TID to unreserve (%d) isn't reserved\n", tid);
return;
}
sta->reserved_tid = IEEE80211_TID_UNRESERVED;
}
EXPORT_SYMBOL(ieee80211_unreserve_tid);
void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid, int link_id,
enum nl80211_band band)
{
const struct ieee80211_hdr *hdr = (void *)skb->data;
int ac = ieee80211_ac_from_tid(tid);
unsigned int link;
skb_reset_mac_header(skb);
skb_set_queue_mapping(skb, ac);
skb->priority = tid;
skb->dev = sdata->dev;
BUILD_BUG_ON(IEEE80211_LINK_UNSPECIFIED < IEEE80211_MLD_MAX_NUM_LINKS);
BUILD_BUG_ON(!FIELD_FIT(IEEE80211_TX_CTRL_MLO_LINK,
IEEE80211_LINK_UNSPECIFIED));
if (!ieee80211_vif_is_mld(&sdata->vif)) {
link = 0;
} else if (link_id >= 0) {
link = link_id;
} else if (memcmp(sdata->vif.addr, hdr->addr2, ETH_ALEN) == 0) {
/* address from the MLD */
link = IEEE80211_LINK_UNSPECIFIED;
} else {
/* otherwise must be addressed from a link */
rcu_read_lock();
for (link = 0; link < ARRAY_SIZE(sdata->vif.link_conf); link++) {
struct ieee80211_bss_conf *link_conf;
link_conf = rcu_dereference(sdata->vif.link_conf[link]);
if (!link_conf)
continue;
if (memcmp(link_conf->addr, hdr->addr2, ETH_ALEN) == 0)
break;
}
rcu_read_unlock();
if (WARN_ON_ONCE(link == ARRAY_SIZE(sdata->vif.link_conf)))
link = ffs(sdata->vif.active_links) - 1;
}
IEEE80211_SKB_CB(skb)->control.flags |=
u32_encode_bits(link, IEEE80211_TX_CTRL_MLO_LINK);
/*
* The other path calling ieee80211_xmit is from the tasklet,
* and while we can handle concurrent transmissions locking
* requirements are that we do not come into tx with bhs on.
*/
local_bh_disable();
IEEE80211_SKB_CB(skb)->band = band;
ieee80211_xmit(sdata, NULL, skb);
local_bh_enable();
}
void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid, int link_id)
{
struct ieee80211_chanctx_conf *chanctx_conf;
enum nl80211_band band;
rcu_read_lock();
if (!ieee80211_vif_is_mld(&sdata->vif)) {
WARN_ON(link_id >= 0);
chanctx_conf =
rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
kfree_skb(skb);
return;
}
band = chanctx_conf->def.chan->band;
} else {
WARN_ON(link_id >= 0 &&
!(sdata->vif.active_links & BIT(link_id)));
/* MLD transmissions must not rely on the band */
band = 0;
}
__ieee80211_tx_skb_tid_band(sdata, skb, tid, link_id, band);
rcu_read_unlock();
}
int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
const u8 *buf, size_t len,
const u8 *dest, __be16 proto, bool unencrypted,
int link_id, u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
struct sk_buff *skb;
struct ethhdr *ehdr;
u32 ctrl_flags = 0;
u32 flags = 0;
int err;
/* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
* or Pre-Authentication
*/
if (proto != sdata->control_port_protocol &&
proto != cpu_to_be16(ETH_P_PREAUTH))
return -EINVAL;
if (proto == sdata->control_port_protocol)
ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO |
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
if (unencrypted)
flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
if (cookie)
ctrl_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX;
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
sizeof(struct ethhdr) + len);
if (!skb)
return -ENOMEM;
skb_reserve(skb, local->hw.extra_tx_headroom + sizeof(struct ethhdr));
skb_put_data(skb, buf, len);
ehdr = skb_push(skb, sizeof(struct ethhdr));
memcpy(ehdr->h_dest, dest, ETH_ALEN);
/* we may override the SA for MLO STA later */
if (link_id < 0) {
ctrl_flags |= u32_encode_bits(IEEE80211_LINK_UNSPECIFIED,
IEEE80211_TX_CTRL_MLO_LINK);
memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN);
} else {
struct ieee80211_bss_conf *link_conf;
ctrl_flags |= u32_encode_bits(link_id,
IEEE80211_TX_CTRL_MLO_LINK);
rcu_read_lock();
link_conf = rcu_dereference(sdata->vif.link_conf[link_id]);
if (!link_conf) {
dev_kfree_skb(skb);
rcu_read_unlock();
return -ENOLINK;
}
memcpy(ehdr->h_source, link_conf->addr, ETH_ALEN);
rcu_read_unlock();
}
ehdr->h_proto = proto;
skb->dev = dev;
skb->protocol = proto;
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
if (local->hw.queues < IEEE80211_NUM_ACS)
goto start_xmit;
/* update QoS header to prioritize control port frames if possible,
* priorization also happens for control port frames send over
* AF_PACKET
*/
rcu_read_lock();
err = ieee80211_lookup_ra_sta(sdata, skb, &sta);
if (err) {
dev_kfree_skb(skb);
rcu_read_unlock();
return err;
}
if (!IS_ERR(sta)) {
u16 queue = ieee80211_select_queue(sdata, sta, skb);
skb_set_queue_mapping(skb, queue);
/*
* for MLO STA, the SA should be the AP MLD address, but
* the link ID has been selected already
*/
if (sta && sta->sta.mlo)
memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN);
}
rcu_read_unlock();
start_xmit:
/* mutex lock is only needed for incrementing the cookie counter */
mutex_lock(&local->mtx);
local_bh_disable();
__ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags, cookie);
local_bh_enable();
mutex_unlock(&local->mtx);
return 0;
}
int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
const u8 *buf, size_t len)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + len +
30 + /* header size */
18); /* 11s header size */
if (!skb)
return -ENOMEM;
skb_reserve(skb, local->hw.extra_tx_headroom);
skb_put_data(skb, buf, len);
skb->dev = dev;
skb->protocol = htons(ETH_P_802_3);
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
local_bh_disable();
__ieee80211_subif_start_xmit(skb, skb->dev, 0,
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP,
NULL);
local_bh_enable();
return 0;
}
| linux-master | net/mac80211/tx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
* Copyright (C) 2018 - 2023 Intel Corporation
* Authors: Luis Carlos Cobo <[email protected]>
* Javier Cardona <[email protected]>
*/
#include <linux/slab.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
#include "mesh.h"
#include "wme.h"
#include "driver-ops.h"
static int mesh_allocated;
static struct kmem_cache *rm_cache;
bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
{
return (mgmt->u.action.u.mesh_action.action_code ==
WLAN_MESH_ACTION_HWMP_PATH_SELECTION);
}
void ieee80211s_init(void)
{
mesh_allocated = 1;
rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry),
0, 0, NULL);
}
void ieee80211s_stop(void)
{
if (!mesh_allocated)
return;
kmem_cache_destroy(rm_cache);
}
static void ieee80211_mesh_housekeeping_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
from_timer(sdata, t, u.mesh.housekeeping_timer);
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
wiphy_work_queue(local->hw.wiphy, &sdata->work);
}
/**
* mesh_matches_local - check if the config of a mesh point matches ours
*
* @sdata: local mesh subif
* @ie: information elements of a management frame from the mesh peer
*
* This function checks if the mesh configuration of a mesh point matches the
* local mesh configuration, i.e. if both nodes belong to the same mesh network.
*/
bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *ie)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u32 basic_rates = 0;
struct cfg80211_chan_def sta_chan_def;
struct ieee80211_supported_band *sband;
u32 vht_cap_info = 0;
/*
* As support for each feature is added, check for matching
* - On mesh config capabilities
* - Power Save Support En
* - Sync support enabled
* - Sync support active
* - Sync support required from peer
* - MDA enabled
* - Power management control on fc
*/
if (!(ifmsh->mesh_id_len == ie->mesh_id_len &&
memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
(ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
(ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) &&
(ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
(ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
(ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
return false;
sband = ieee80211_get_sband(sdata);
if (!sband)
return false;
ieee80211_sta_get_rates(sdata, ie, sband->band,
&basic_rates);
if (sdata->vif.bss_conf.basic_rates != basic_rates)
return false;
cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan,
NL80211_CHAN_NO_HT);
ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def);
if (ie->vht_cap_elem)
vht_cap_info = le32_to_cpu(ie->vht_cap_elem->vht_cap_info);
ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
ie->vht_operation, ie->ht_operation,
&sta_chan_def);
ieee80211_chandef_he_6ghz_oper(sdata, ie->he_operation, ie->eht_operation,
&sta_chan_def);
if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
&sta_chan_def))
return false;
return true;
}
/**
* mesh_peer_accepts_plinks - check if an mp is willing to establish peer links
*
* @ie: information elements of a management frame from the mesh peer
*/
bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
{
return (ie->mesh_config->meshconf_cap &
IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS) != 0;
}
/**
* mesh_accept_plinks_update - update accepting_plink in local mesh beacons
*
* @sdata: mesh interface in which mesh beacons are going to be updated
*
* Returns: beacon changed flag if the beacon content changed.
*/
u64 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
{
bool free_plinks;
u64 changed = 0;
/* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0,
* the mesh interface might be able to establish plinks with peers that
* are already on the table but are not on PLINK_ESTAB state. However,
* in general the mesh interface is not accepting peer link requests
* from new peers, and that must be reflected in the beacon
*/
free_plinks = mesh_plink_availables(sdata);
if (free_plinks != sdata->u.mesh.accepting_plinks) {
sdata->u.mesh.accepting_plinks = free_plinks;
changed = BSS_CHANGED_BEACON;
}
return changed;
}
/*
* mesh_sta_cleanup - clean up any mesh sta state
*
* @sta: mesh sta to clean up.
*/
void mesh_sta_cleanup(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
u64 changed = mesh_plink_deactivate(sta);
if (changed)
ieee80211_mbss_info_change_notify(sdata, changed);
}
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
{
int i;
sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL);
if (!sdata->u.mesh.rmc)
return -ENOMEM;
sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
for (i = 0; i < RMC_BUCKETS; i++)
INIT_HLIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
return 0;
}
void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
{
struct mesh_rmc *rmc = sdata->u.mesh.rmc;
struct rmc_entry *p;
struct hlist_node *n;
int i;
if (!sdata->u.mesh.rmc)
return;
for (i = 0; i < RMC_BUCKETS; i++) {
hlist_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
hlist_del(&p->list);
kmem_cache_free(rm_cache, p);
}
}
kfree(rmc);
sdata->u.mesh.rmc = NULL;
}
/**
* mesh_rmc_check - Check frame in recent multicast cache and add if absent.
*
* @sdata: interface
* @sa: source address
* @mesh_hdr: mesh_header
*
* Returns: 0 if the frame is not in the cache, nonzero otherwise.
*
* Checks using the source address and the mesh sequence number if we have
* received this frame lately. If the frame is not in the cache, it is added to
* it.
*/
int mesh_rmc_check(struct ieee80211_sub_if_data *sdata,
const u8 *sa, struct ieee80211s_hdr *mesh_hdr)
{
struct mesh_rmc *rmc = sdata->u.mesh.rmc;
u32 seqnum = 0;
int entries = 0;
u8 idx;
struct rmc_entry *p;
struct hlist_node *n;
if (!rmc)
return -1;
/* Don't care about endianness since only match matters */
memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
hlist_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
++entries;
if (time_after(jiffies, p->exp_time) ||
entries == RMC_QUEUE_MAX_LEN) {
hlist_del(&p->list);
kmem_cache_free(rm_cache, p);
--entries;
} else if ((seqnum == p->seqnum) && ether_addr_equal(sa, p->sa))
return -1;
}
p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
if (!p)
return 0;
p->seqnum = seqnum;
p->exp_time = jiffies + RMC_TIMEOUT;
memcpy(p->sa, sa, ETH_ALEN);
hlist_add_head(&p->list, &rmc->bucket[idx]);
return 0;
}
int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u8 *pos, neighbors;
u8 meshconf_len = sizeof(struct ieee80211_meshconf_ie);
bool is_connected_to_gate = ifmsh->num_gates > 0 ||
ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol ||
ifmsh->mshcfg.dot11MeshConnectedToMeshGate;
bool is_connected_to_as = ifmsh->mshcfg.dot11MeshConnectedToAuthServer;
if (skb_tailroom(skb) < 2 + meshconf_len)
return -ENOMEM;
pos = skb_put(skb, 2 + meshconf_len);
*pos++ = WLAN_EID_MESH_CONFIG;
*pos++ = meshconf_len;
/* save a pointer for quick updates in pre-tbtt */
ifmsh->meshconf_offset = pos - skb->data;
/* Active path selection protocol ID */
*pos++ = ifmsh->mesh_pp_id;
/* Active path selection metric ID */
*pos++ = ifmsh->mesh_pm_id;
/* Congestion control mode identifier */
*pos++ = ifmsh->mesh_cc_id;
/* Synchronization protocol identifier */
*pos++ = ifmsh->mesh_sp_id;
/* Authentication Protocol identifier */
*pos++ = ifmsh->mesh_auth_id;
/* Mesh Formation Info - number of neighbors */
neighbors = atomic_read(&ifmsh->estab_plinks);
neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS);
*pos++ = (is_connected_to_as << 7) |
(neighbors << 1) |
is_connected_to_gate;
/* Mesh capability */
*pos = 0x00;
*pos |= ifmsh->mshcfg.dot11MeshForwarding ?
IEEE80211_MESHCONF_CAPAB_FORWARDING : 0x00;
*pos |= ifmsh->accepting_plinks ?
IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
/* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
*pos |= ifmsh->ps_peers_deep_sleep ?
IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
return 0;
}
int mesh_add_meshid_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u8 *pos;
if (skb_tailroom(skb) < 2 + ifmsh->mesh_id_len)
return -ENOMEM;
pos = skb_put(skb, 2 + ifmsh->mesh_id_len);
*pos++ = WLAN_EID_MESH_ID;
*pos++ = ifmsh->mesh_id_len;
if (ifmsh->mesh_id_len)
memcpy(pos, ifmsh->mesh_id, ifmsh->mesh_id_len);
return 0;
}
static int mesh_add_awake_window_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u8 *pos;
/* see IEEE802.11-2012 13.14.6 */
if (ifmsh->ps_peers_light_sleep == 0 &&
ifmsh->ps_peers_deep_sleep == 0 &&
ifmsh->nonpeer_pm == NL80211_MESH_POWER_ACTIVE)
return 0;
if (skb_tailroom(skb) < 4)
return -ENOMEM;
pos = skb_put(skb, 2 + 2);
*pos++ = WLAN_EID_MESH_AWAKE_WINDOW;
*pos++ = 2;
put_unaligned_le16(ifmsh->mshcfg.dot11MeshAwakeWindowDuration, pos);
return 0;
}
int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u8 offset, len;
const u8 *data;
if (!ifmsh->ie || !ifmsh->ie_len)
return 0;
/* fast-forward to vendor IEs */
offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
if (offset < ifmsh->ie_len) {
len = ifmsh->ie_len - offset;
data = ifmsh->ie + offset;
if (skb_tailroom(skb) < len)
return -ENOMEM;
skb_put_data(skb, data, len);
}
return 0;
}
int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u8 len = 0;
const u8 *data;
if (!ifmsh->ie || !ifmsh->ie_len)
return 0;
/* find RSN IE */
data = cfg80211_find_ie(WLAN_EID_RSN, ifmsh->ie, ifmsh->ie_len);
if (!data)
return 0;
len = data[1] + 2;
if (skb_tailroom(skb) < len)
return -ENOMEM;
skb_put_data(skb, data, len);
return 0;
}
static int mesh_add_ds_params_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *chan;
u8 *pos;
if (skb_tailroom(skb) < 3)
return -ENOMEM;
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return -EINVAL;
}
chan = chanctx_conf->def.chan;
rcu_read_unlock();
pos = skb_put(skb, 2 + 1);
*pos++ = WLAN_EID_DS_PARAMS;
*pos++ = 1;
*pos++ = ieee80211_frequency_to_channel(chan->center_freq);
return 0;
}
int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_supported_band *sband;
u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
/* HT not allowed in 6 GHz */
if (sband->band == NL80211_BAND_6GHZ)
return 0;
if (!sband->ht_cap.ht_supported ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
return -ENOMEM;
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap));
ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap);
return 0;
}
int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *channel;
struct ieee80211_supported_band *sband;
struct ieee80211_sta_ht_cap *ht_cap;
u8 *pos;
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return -EINVAL;
}
channel = chanctx_conf->def.chan;
rcu_read_unlock();
sband = local->hw.wiphy->bands[channel->band];
ht_cap = &sband->ht_cap;
/* HT not allowed in 6 GHz */
if (sband->band == NL80211_BAND_6GHZ)
return 0;
if (!ht_cap->ht_supported ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
return -ENOMEM;
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chandef,
sdata->vif.bss_conf.ht_operation_mode,
false);
return 0;
}
int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_supported_band *sband;
u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
/* VHT not allowed in 6 GHz */
if (sband->band == NL80211_BAND_6GHZ)
return 0;
if (!sband->vht_cap.vht_supported ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
return -ENOMEM;
pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap));
ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, sband->vht_cap.cap);
return 0;
}
int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *channel;
struct ieee80211_supported_band *sband;
struct ieee80211_sta_vht_cap *vht_cap;
u8 *pos;
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return -EINVAL;
}
channel = chanctx_conf->def.chan;
rcu_read_unlock();
sband = local->hw.wiphy->bands[channel->band];
vht_cap = &sband->vht_cap;
/* VHT not allowed in 6 GHz */
if (sband->band == NL80211_BAND_6GHZ)
return 0;
if (!vht_cap->vht_supported ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_operation))
return -ENOMEM;
pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
ieee80211_ie_build_vht_oper(pos, vht_cap,
&sdata->vif.bss_conf.chandef);
return 0;
}
int mesh_add_he_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ie_len)
{
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_supported_band *sband;
u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!he_cap ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < ie_len)
return -ENOMEM;
pos = skb_put(skb, ie_len);
ieee80211_ie_build_he_cap(0, pos, he_cap, pos + ie_len);
return 0;
}
int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_supported_band *sband;
u32 len;
u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!he_cap ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
return 0;
len = 2 + 1 + sizeof(struct ieee80211_he_operation);
if (sdata->vif.bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
len += sizeof(struct ieee80211_he_6ghz_oper);
if (skb_tailroom(skb) < len)
return -ENOMEM;
pos = skb_put(skb, len);
ieee80211_ie_build_he_oper(pos, &sdata->vif.bss_conf.chandef);
return 0;
}
int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_supported_band *sband;
const struct ieee80211_sband_iftype_data *iftd;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
iftd = ieee80211_get_sband_iftype_data(sband,
NL80211_IFTYPE_MESH_POINT);
/* The device doesn't support HE in mesh mode or at all */
if (!iftd)
return 0;
ieee80211_ie_build_he_6ghz_cap(sdata, sdata->deflink.smps_mode, skb);
return 0;
}
int mesh_add_eht_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ie_len)
{
const struct ieee80211_sta_he_cap *he_cap;
const struct ieee80211_sta_eht_cap *eht_cap;
struct ieee80211_supported_band *sband;
u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!he_cap || !eht_cap ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < ie_len)
return -ENOMEM;
pos = skb_put(skb, ie_len);
ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + ie_len, false);
return 0;
}
int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
const struct ieee80211_sta_eht_cap *eht_cap;
struct ieee80211_supported_band *sband;
u32 len;
u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!eht_cap ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
return 0;
len = 2 + 1 + offsetof(struct ieee80211_eht_operation, optional) +
offsetof(struct ieee80211_eht_operation_info, optional);
if (skb_tailroom(skb) < len)
return -ENOMEM;
pos = skb_put(skb, len);
ieee80211_ie_build_eht_oper(pos, &sdata->vif.bss_conf.chandef, eht_cap);
return 0;
}
static void ieee80211_mesh_path_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
from_timer(sdata, t, u.mesh.mesh_path_timer);
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
}
static void ieee80211_mesh_path_root_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
from_timer(sdata, t, u.mesh.mesh_path_root_timer);
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
}
void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
{
if (ifmsh->mshcfg.dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)
set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
else {
clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
/* stop running timer */
del_timer_sync(&ifmsh->mesh_path_root_timer);
}
}
static void
ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
u8 *ie, u8 ie_len)
{
struct ieee80211_supported_band *sband;
const struct element *cap;
const struct ieee80211_he_operation *he_oper = NULL;
sband = ieee80211_get_sband(sdata);
if (!sband)
return;
if (!ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT) ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
return;
sdata->vif.bss_conf.he_support = true;
cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
if (cap && cap->datalen >= 1 + sizeof(*he_oper) &&
cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1))
he_oper = (void *)(cap->data + 1);
if (he_oper)
sdata->vif.bss_conf.he_oper.params =
__le32_to_cpu(he_oper->he_oper_params);
sdata->vif.bss_conf.eht_support =
!!ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
}
bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 ctrl_flags)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_mesh_fast_tx *entry;
struct ieee80211s_hdr *meshhdr;
u8 sa[ETH_ALEN] __aligned(2);
struct tid_ampdu_tx *tid_tx;
struct sta_info *sta;
bool copy_sa = false;
u16 ethertype;
u8 tid;
if (ctrl_flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP)
return false;
if (ifmsh->mshcfg.dot11MeshNolearn)
return false;
/* Add support for these cases later */
if (ifmsh->ps_peers_light_sleep || ifmsh->ps_peers_deep_sleep)
return false;
if (is_multicast_ether_addr(skb->data))
return false;
ethertype = (skb->data[12] << 8) | skb->data[13];
if (ethertype < ETH_P_802_3_MIN)
return false;
if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)
return false;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_set_transport_header(skb, skb_checksum_start_offset(skb));
if (skb_checksum_help(skb))
return false;
}
entry = mesh_fast_tx_get(sdata, skb->data);
if (!entry)
return false;
if (skb_headroom(skb) < entry->hdrlen + entry->fast_tx.hdr_len)
return false;
sta = rcu_dereference(entry->mpath->next_hop);
if (!sta)
return false;
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
if (tid_tx) {
if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state))
return false;
if (tid_tx->timeout)
tid_tx->last_tx = jiffies;
}
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return true;
skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, sta, skb));
meshhdr = (struct ieee80211s_hdr *)entry->hdr;
if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) {
/* preserve SA from eth header for 6-addr frames */
ether_addr_copy(sa, skb->data + ETH_ALEN);
copy_sa = true;
}
memcpy(skb_push(skb, entry->hdrlen - 2 * ETH_ALEN), entry->hdr,
entry->hdrlen);
meshhdr = (struct ieee80211s_hdr *)skb->data;
put_unaligned_le32(atomic_inc_return(&sdata->u.mesh.mesh_seqnum),
&meshhdr->seqnum);
meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
if (copy_sa)
ether_addr_copy(meshhdr->eaddr2, sa);
skb_push(skb, 2 * ETH_ALEN);
__ieee80211_xmit_fast(sdata, sta, &entry->fast_tx, skb, tid_tx,
entry->mpath->dst, sdata->vif.addr);
return true;
}
/**
* ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame
* @hdr: 802.11 frame header
* @fc: frame control field
* @meshda: destination address in the mesh
* @meshsa: source address in the mesh. Same as TA, as frame is
* locally originated.
*
* Return the length of the 802.11 (does not include a mesh control header)
*/
int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
const u8 *meshda, const u8 *meshsa)
{
if (is_multicast_ether_addr(meshda)) {
*fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA TA SA */
memcpy(hdr->addr1, meshda, ETH_ALEN);
memcpy(hdr->addr2, meshsa, ETH_ALEN);
memcpy(hdr->addr3, meshsa, ETH_ALEN);
return 24;
} else {
*fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
eth_zero_addr(hdr->addr1); /* RA is resolved later */
memcpy(hdr->addr2, meshsa, ETH_ALEN);
memcpy(hdr->addr3, meshda, ETH_ALEN);
memcpy(hdr->addr4, meshsa, ETH_ALEN);
return 30;
}
}
/**
* ieee80211_new_mesh_header - create a new mesh header
* @sdata: mesh interface to be used
* @meshhdr: uninitialized mesh header
* @addr4or5: 1st address in the ae header, which may correspond to address 4
* (if addr6 is NULL) or address 5 (if addr6 is present). It may
* be NULL.
* @addr6: 2nd address in the ae header, which corresponds to addr6 of the
* mesh frame
*
* Return the header length.
*/
unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
struct ieee80211s_hdr *meshhdr,
const char *addr4or5, const char *addr6)
{
if (WARN_ON(!addr4or5 && addr6))
return 0;
memset(meshhdr, 0, sizeof(*meshhdr));
meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
put_unaligned_le32(atomic_inc_return(&sdata->u.mesh.mesh_seqnum),
&meshhdr->seqnum);
if (addr4or5 && !addr6) {
meshhdr->flags |= MESH_FLAGS_AE_A4;
memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN);
return 2 * ETH_ALEN;
} else if (addr4or5 && addr6) {
meshhdr->flags |= MESH_FLAGS_AE_A5_A6;
memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN);
memcpy(meshhdr->eaddr2, addr6, ETH_ALEN);
return 3 * ETH_ALEN;
}
return ETH_ALEN;
}
static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u64 changed;
if (ifmsh->mshcfg.plink_timeout > 0)
ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ);
mesh_path_expire(sdata);
changed = mesh_accept_plinks_update(sdata);
ieee80211_mbss_info_change_notify(sdata, changed);
mesh_fast_tx_gc(sdata);
mod_timer(&ifmsh->housekeeping_timer,
round_jiffies(jiffies +
IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
}
static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u32 interval;
mesh_path_tx_root_frame(sdata);
if (ifmsh->mshcfg.dot11MeshHWMPRootMode == IEEE80211_PROACTIVE_RANN)
interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
else
interval = ifmsh->mshcfg.dot11MeshHWMProotInterval;
mod_timer(&ifmsh->mesh_path_root_timer,
round_jiffies(TU_TO_EXP_TIME(interval)));
}
static int
ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
{
struct beacon_data *bcn;
int head_len, tail_len;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
struct ieee80211_chanctx_conf *chanctx_conf;
struct mesh_csa_settings *csa;
enum nl80211_band band;
u8 ie_len_he_cap, ie_len_eht_cap;
u8 *pos;
struct ieee80211_sub_if_data *sdata;
int hdr_len = offsetofend(struct ieee80211_mgmt, u.beacon);
sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
band = chanctx_conf->def.chan->band;
rcu_read_unlock();
ie_len_he_cap = ieee80211_ie_len_he_cap(sdata,
NL80211_IFTYPE_MESH_POINT);
ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata,
NL80211_IFTYPE_MESH_POINT);
head_len = hdr_len +
2 + /* NULL SSID */
/* Channel Switch Announcement */
2 + sizeof(struct ieee80211_channel_sw_ie) +
/* Mesh Channel Switch Parameters */
2 + sizeof(struct ieee80211_mesh_chansw_params_ie) +
/* Channel Switch Wrapper + Wide Bandwidth CSA IE */
2 + 2 + sizeof(struct ieee80211_wide_bw_chansw_ie) +
2 + sizeof(struct ieee80211_sec_chan_offs_ie) +
2 + 8 + /* supported rates */
2 + 3; /* DS params */
tail_len = 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2 + sizeof(struct ieee80211_ht_cap) +
2 + sizeof(struct ieee80211_ht_operation) +
2 + ifmsh->mesh_id_len +
2 + sizeof(struct ieee80211_meshconf_ie) +
2 + sizeof(__le16) + /* awake window */
2 + sizeof(struct ieee80211_vht_cap) +
2 + sizeof(struct ieee80211_vht_operation) +
ie_len_he_cap +
2 + 1 + sizeof(struct ieee80211_he_operation) +
sizeof(struct ieee80211_he_6ghz_oper) +
2 + 1 + sizeof(struct ieee80211_he_6ghz_capa) +
ie_len_eht_cap +
2 + 1 + offsetof(struct ieee80211_eht_operation, optional) +
offsetof(struct ieee80211_eht_operation_info, optional) +
ifmsh->ie_len;
bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL);
/* need an skb for IE builders to operate on */
skb = __dev_alloc_skb(max(head_len, tail_len), GFP_KERNEL);
if (!bcn || !skb)
goto out_free;
/*
* pointers go into the block we allocated,
* memory is | beacon_data | head | tail |
*/
bcn->head = ((u8 *) bcn) + sizeof(*bcn);
/* fill in the head */
mgmt = skb_put_zero(skb, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_BEACON);
eth_broadcast_addr(mgmt->da);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
ieee80211_mps_set_frame_flags(sdata, NULL, (void *) mgmt);
mgmt->u.beacon.beacon_int =
cpu_to_le16(sdata->vif.bss_conf.beacon_int);
mgmt->u.beacon.capab_info |= cpu_to_le16(
sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
pos = skb_put(skb, 2);
*pos++ = WLAN_EID_SSID;
*pos++ = 0x0;
rcu_read_lock();
csa = rcu_dereference(ifmsh->csa);
if (csa) {
enum nl80211_channel_type ct;
struct cfg80211_chan_def *chandef;
int ie_len = 2 + sizeof(struct ieee80211_channel_sw_ie) +
2 + sizeof(struct ieee80211_mesh_chansw_params_ie);
pos = skb_put_zero(skb, ie_len);
*pos++ = WLAN_EID_CHANNEL_SWITCH;
*pos++ = 3;
*pos++ = 0x0;
*pos++ = ieee80211_frequency_to_channel(
csa->settings.chandef.chan->center_freq);
bcn->cntdwn_current_counter = csa->settings.count;
bcn->cntdwn_counter_offsets[0] = hdr_len + 6;
*pos++ = csa->settings.count;
*pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
*pos++ = 6;
if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT) {
*pos++ = ifmsh->mshcfg.dot11MeshTTL;
*pos |= WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
} else {
*pos++ = ifmsh->chsw_ttl;
}
*pos++ |= csa->settings.block_tx ?
WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00;
put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos);
pos += 2;
put_unaligned_le16(ifmsh->pre_value, pos);
pos += 2;
switch (csa->settings.chandef.width) {
case NL80211_CHAN_WIDTH_40:
ie_len = 2 + sizeof(struct ieee80211_sec_chan_offs_ie);
pos = skb_put_zero(skb, ie_len);
*pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; /* EID */
*pos++ = 1; /* len */
ct = cfg80211_get_chandef_type(&csa->settings.chandef);
if (ct == NL80211_CHAN_HT40PLUS)
*pos++ = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
else
*pos++ = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
break;
case NL80211_CHAN_WIDTH_80:
case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
/* Channel Switch Wrapper + Wide Bandwidth CSA IE */
ie_len = 2 + 2 +
sizeof(struct ieee80211_wide_bw_chansw_ie);
pos = skb_put_zero(skb, ie_len);
*pos++ = WLAN_EID_CHANNEL_SWITCH_WRAPPER; /* EID */
*pos++ = 5; /* len */
/* put sub IE */
chandef = &csa->settings.chandef;
ieee80211_ie_build_wide_bw_cs(pos, chandef);
break;
default:
break;
}
}
rcu_read_unlock();
if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
mesh_add_ds_params_ie(sdata, skb))
goto out_free;
bcn->head_len = skb->len;
memcpy(bcn->head, skb->data, bcn->head_len);
/* now the tail */
skb_trim(skb, 0);
bcn->tail = bcn->head + bcn->head_len;
if (ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
mesh_add_rsn_ie(sdata, skb) ||
mesh_add_ht_cap_ie(sdata, skb) ||
mesh_add_ht_oper_ie(sdata, skb) ||
mesh_add_meshid_ie(sdata, skb) ||
mesh_add_meshconf_ie(sdata, skb) ||
mesh_add_awake_window_ie(sdata, skb) ||
mesh_add_vht_cap_ie(sdata, skb) ||
mesh_add_vht_oper_ie(sdata, skb) ||
mesh_add_he_cap_ie(sdata, skb, ie_len_he_cap) ||
mesh_add_he_oper_ie(sdata, skb) ||
mesh_add_he_6ghz_cap_ie(sdata, skb) ||
mesh_add_eht_cap_ie(sdata, skb, ie_len_eht_cap) ||
mesh_add_eht_oper_ie(sdata, skb) ||
mesh_add_vendor_ies(sdata, skb))
goto out_free;
bcn->tail_len = skb->len;
memcpy(bcn->tail, skb->data, bcn->tail_len);
ieee80211_mesh_update_bss_params(sdata, bcn->tail, bcn->tail_len);
bcn->meshconf = (struct ieee80211_meshconf_ie *)
(bcn->tail + ifmsh->meshconf_offset);
dev_kfree_skb(skb);
rcu_assign_pointer(ifmsh->beacon, bcn);
return 0;
out_free:
kfree(bcn);
dev_kfree_skb(skb);
return -ENOMEM;
}
static int
ieee80211_mesh_rebuild_beacon(struct ieee80211_sub_if_data *sdata)
{
struct beacon_data *old_bcn;
int ret;
old_bcn = sdata_dereference(sdata->u.mesh.beacon, sdata);
ret = ieee80211_mesh_build_beacon(&sdata->u.mesh);
if (ret)
/* just reuse old beacon */
return ret;
if (old_bcn)
kfree_rcu(old_bcn, rcu_head);
return 0;
}
void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
unsigned long bits = changed;
u32 bit;
if (!bits)
return;
/* if we race with running work, worst case this work becomes a noop */
for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE)
set_bit(bit, &ifmsh->mbss_changed);
set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags);
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
}
int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
u64 changed = BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_HT |
BSS_CHANGED_BASIC_RATES |
BSS_CHANGED_BEACON_INT |
BSS_CHANGED_MCAST_RATE;
local->fif_other_bss++;
/* mesh ifaces must set allmulti to forward mcast traffic */
atomic_inc(&local->iff_allmultis);
ieee80211_configure_filter(local);
ifmsh->mesh_cc_id = 0; /* Disabled */
/* register sync ops from extensible synchronization framework */
ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
ifmsh->sync_offset_clockdrift_max = 0;
set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
ieee80211_mesh_root_setup(ifmsh);
wiphy_work_queue(local->hw.wiphy, &sdata->work);
sdata->vif.bss_conf.ht_operation_mode =
ifmsh->mshcfg.ht_opmode;
sdata->vif.bss_conf.enable_beacon = true;
changed |= ieee80211_mps_local_status_update(sdata);
if (ieee80211_mesh_build_beacon(ifmsh)) {
ieee80211_stop_mesh(sdata);
return -ENOMEM;
}
ieee80211_recalc_dtim(local, sdata);
ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed);
netif_carrier_on(sdata->dev);
return 0;
}
void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct beacon_data *bcn;
netif_carrier_off(sdata->dev);
/* flush STAs and mpaths on this iface */
sta_info_flush(sdata);
ieee80211_free_keys(sdata, true);
mesh_path_flush_by_iface(sdata);
/* stop the beacon */
ifmsh->mesh_id_len = 0;
sdata->vif.bss_conf.enable_beacon = false;
sdata->beacon_rate_set = false;
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BEACON_ENABLED);
/* remove beacon */
bcn = sdata_dereference(ifmsh->beacon, sdata);
RCU_INIT_POINTER(ifmsh->beacon, NULL);
kfree_rcu(bcn, rcu_head);
/* free all potentially still buffered group-addressed frames */
local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf);
skb_queue_purge(&ifmsh->ps.bc_buf);
del_timer_sync(&sdata->u.mesh.housekeeping_timer);
del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
del_timer_sync(&sdata->u.mesh.mesh_path_timer);
/* clear any mesh work (for next join) we may have accrued */
ifmsh->wrkq_flags = 0;
ifmsh->mbss_changed = 0;
local->fif_other_bss--;
atomic_dec(&local->iff_allmultis);
ieee80211_configure_filter(local);
}
static void ieee80211_mesh_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
{
int err;
/* if the current channel is a DFS channel, mark the channel as
* unavailable.
*/
err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
&sdata->vif.bss_conf.chandef,
NL80211_IFTYPE_MESH_POINT);
if (err > 0)
cfg80211_radar_event(sdata->local->hw.wiphy,
&sdata->vif.bss_conf.chandef, GFP_ATOMIC);
}
static bool
ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems, bool beacon)
{
struct cfg80211_csa_settings params;
struct ieee80211_csa_ie csa_ie;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_supported_band *sband;
int err;
ieee80211_conn_flags_t conn_flags = 0;
u32 vht_cap_info = 0;
sdata_assert_lock(sdata);
sband = ieee80211_get_sband(sdata);
if (!sband)
return false;
switch (sdata->vif.bss_conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
conn_flags |= IEEE80211_CONN_DISABLE_HT;
fallthrough;
case NL80211_CHAN_WIDTH_20:
conn_flags |= IEEE80211_CONN_DISABLE_40MHZ;
fallthrough;
case NL80211_CHAN_WIDTH_40:
conn_flags |= IEEE80211_CONN_DISABLE_VHT;
break;
default:
break;
}
if (elems->vht_cap_elem)
vht_cap_info =
le32_to_cpu(elems->vht_cap_elem->vht_cap_info);
memset(¶ms, 0, sizeof(params));
err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band,
vht_cap_info,
conn_flags, sdata->vif.addr,
&csa_ie);
if (err < 0)
return false;
if (err)
return false;
/* Mark the channel unavailable if the reason for the switch is
* regulatory.
*/
if (csa_ie.reason_code == WLAN_REASON_MESH_CHAN_REGULATORY)
ieee80211_mesh_csa_mark_radar(sdata);
params.chandef = csa_ie.chandef;
params.count = csa_ie.count;
if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, ¶ms.chandef,
IEEE80211_CHAN_DISABLED) ||
!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, ¶ms.chandef,
NL80211_IFTYPE_MESH_POINT)) {
sdata_info(sdata,
"mesh STA %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), aborting\n",
sdata->vif.addr,
params.chandef.chan->center_freq,
params.chandef.width,
params.chandef.center_freq1,
params.chandef.center_freq2);
return false;
}
err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
¶ms.chandef,
NL80211_IFTYPE_MESH_POINT);
if (err < 0)
return false;
if (err > 0 && !ifmsh->userspace_handles_dfs) {
sdata_info(sdata,
"mesh STA %pM switches to channel requiring DFS (%d MHz, width:%d, CF1/2: %d/%d MHz), aborting\n",
sdata->vif.addr,
params.chandef.chan->center_freq,
params.chandef.width,
params.chandef.center_freq1,
params.chandef.center_freq2);
return false;
}
params.radar_required = err;
if (cfg80211_chandef_identical(¶ms.chandef,
&sdata->vif.bss_conf.chandef)) {
mcsa_dbg(sdata,
"received csa with an identical chandef, ignoring\n");
return true;
}
mcsa_dbg(sdata,
"received channel switch announcement to go to channel %d MHz\n",
params.chandef.chan->center_freq);
params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT;
if (beacon) {
ifmsh->chsw_ttl = csa_ie.ttl - 1;
if (ifmsh->pre_value >= csa_ie.pre_value)
return false;
ifmsh->pre_value = csa_ie.pre_value;
}
if (ifmsh->chsw_ttl >= ifmsh->mshcfg.dot11MeshTTL)
return false;
ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_REPEATER;
if (ieee80211_channel_switch(sdata->local->hw.wiphy, sdata->dev,
¶ms) < 0)
return false;
return true;
}
static void
ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct sk_buff *presp;
struct beacon_data *bcn;
struct ieee80211_mgmt *hdr;
struct ieee802_11_elems *elems;
size_t baselen;
u8 *pos;
pos = mgmt->u.probe_req.variable;
baselen = (u8 *) pos - (u8 *) mgmt;
if (baselen > len)
return;
elems = ieee802_11_parse_elems(pos, len - baselen, false, NULL);
if (!elems)
return;
if (!elems->mesh_id)
goto free;
/* 802.11-2012 10.1.4.3.2 */
if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) &&
!is_broadcast_ether_addr(mgmt->da)) ||
elems->ssid_len != 0)
goto free;
if (elems->mesh_id_len != 0 &&
(elems->mesh_id_len != ifmsh->mesh_id_len ||
memcmp(elems->mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len)))
goto free;
rcu_read_lock();
bcn = rcu_dereference(ifmsh->beacon);
if (!bcn)
goto out;
presp = dev_alloc_skb(local->tx_headroom +
bcn->head_len + bcn->tail_len);
if (!presp)
goto out;
skb_reserve(presp, local->tx_headroom);
skb_put_data(presp, bcn->head, bcn->head_len);
skb_put_data(presp, bcn->tail, bcn->tail_len);
hdr = (struct ieee80211_mgmt *) presp->data;
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
memcpy(hdr->da, mgmt->sa, ETH_ALEN);
IEEE80211_SKB_CB(presp)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, presp);
out:
rcu_read_unlock();
free:
kfree(elems);
}
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
u16 stype,
struct ieee80211_mgmt *mgmt,
size_t len,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee802_11_elems *elems;
struct ieee80211_channel *channel;
size_t baselen;
int freq;
enum nl80211_band band = rx_status->band;
/* ignore ProbeResp to foreign address */
if (stype == IEEE80211_STYPE_PROBE_RESP &&
!ether_addr_equal(mgmt->da, sdata->vif.addr))
return;
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
if (baselen > len)
return;
elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable,
len - baselen,
false, NULL);
if (!elems)
return;
/* ignore non-mesh or secure / unsecure mismatch */
if ((!elems->mesh_id || !elems->mesh_config) ||
(elems->rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) ||
(!elems->rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE))
goto free;
if (elems->ds_params)
freq = ieee80211_channel_to_frequency(elems->ds_params[0], band);
else
freq = rx_status->freq;
channel = ieee80211_get_channel(local->hw.wiphy, freq);
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
goto free;
if (mesh_matches_local(sdata, elems)) {
mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n",
sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal);
if (!sdata->u.mesh.user_mpm ||
sdata->u.mesh.mshcfg.rssi_threshold == 0 ||
sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal)
mesh_neighbour_update(sdata, mgmt->sa, elems,
rx_status);
if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
!sdata->vif.bss_conf.csa_active)
ieee80211_mesh_process_chnswitch(sdata, elems, true);
}
if (ifmsh->sync_ops)
ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, len,
elems->mesh_config, rx_status);
free:
kfree(elems);
}
int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_csa_settings *tmp_csa_settings;
int ret = 0;
/* Reset the TTL value and Initiator flag */
ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
ifmsh->chsw_ttl = 0;
/* Remove the CSA and MCSP elements from the beacon */
tmp_csa_settings = sdata_dereference(ifmsh->csa, sdata);
RCU_INIT_POINTER(ifmsh->csa, NULL);
if (tmp_csa_settings)
kfree_rcu(tmp_csa_settings, rcu_head);
ret = ieee80211_mesh_rebuild_beacon(sdata);
if (ret)
return -EINVAL;
*changed |= BSS_CHANGED_BEACON;
mcsa_dbg(sdata, "complete switching to center freq %d MHz",
sdata->vif.bss_conf.chandef.chan->center_freq);
return 0;
}
int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
struct cfg80211_csa_settings *csa_settings,
u64 *changed)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_csa_settings *tmp_csa_settings;
int ret = 0;
lockdep_assert_held(&sdata->wdev.mtx);
tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings),
GFP_ATOMIC);
if (!tmp_csa_settings)
return -ENOMEM;
memcpy(&tmp_csa_settings->settings, csa_settings,
sizeof(struct cfg80211_csa_settings));
rcu_assign_pointer(ifmsh->csa, tmp_csa_settings);
ret = ieee80211_mesh_rebuild_beacon(sdata);
if (ret) {
tmp_csa_settings = rcu_dereference(ifmsh->csa);
RCU_INIT_POINTER(ifmsh->csa, NULL);
kfree_rcu(tmp_csa_settings, rcu_head);
return ret;
}
*changed |= BSS_CHANGED_BEACON;
return 0;
}
static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len,
struct ieee802_11_elems *elems)
{
struct ieee80211_mgmt *mgmt_fwd;
struct sk_buff *skb;
struct ieee80211_local *local = sdata->local;
skb = dev_alloc_skb(local->tx_headroom + len);
if (!skb)
return -ENOMEM;
skb_reserve(skb, local->tx_headroom);
mgmt_fwd = skb_put(skb, len);
elems->mesh_chansw_params_ie->mesh_ttl--;
elems->mesh_chansw_params_ie->mesh_flags &=
~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
memcpy(mgmt_fwd, mgmt, len);
eth_broadcast_addr(mgmt_fwd->da);
memcpy(mgmt_fwd->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt_fwd->bssid, sdata->vif.addr, ETH_ALEN);
ieee80211_tx_skb(sdata, skb);
return 0;
}
static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee802_11_elems *elems;
u16 pre_value;
bool fwd_csa = true;
size_t baselen;
u8 *pos;
if (mgmt->u.action.u.measurement.action_code !=
WLAN_ACTION_SPCT_CHL_SWITCH)
return;
pos = mgmt->u.action.u.chan_switch.variable;
baselen = offsetof(struct ieee80211_mgmt,
u.action.u.chan_switch.variable);
elems = ieee802_11_parse_elems(pos, len - baselen, true, NULL);
if (!elems)
return;
if (!mesh_matches_local(sdata, elems))
goto free;
ifmsh->chsw_ttl = elems->mesh_chansw_params_ie->mesh_ttl;
if (!--ifmsh->chsw_ttl)
fwd_csa = false;
pre_value = le16_to_cpu(elems->mesh_chansw_params_ie->mesh_pre_value);
if (ifmsh->pre_value >= pre_value)
goto free;
ifmsh->pre_value = pre_value;
if (!sdata->vif.bss_conf.csa_active &&
!ieee80211_mesh_process_chnswitch(sdata, elems, false)) {
mcsa_dbg(sdata, "Failed to process CSA action frame");
goto free;
}
/* forward or re-broadcast the CSA frame */
if (fwd_csa) {
if (mesh_fwd_csa_frame(sdata, mgmt, len, elems) < 0)
mcsa_dbg(sdata, "Failed to forward the CSA frame");
}
free:
kfree(elems);
}
static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
size_t len,
struct ieee80211_rx_status *rx_status)
{
switch (mgmt->u.action.category) {
case WLAN_CATEGORY_SELF_PROTECTED:
switch (mgmt->u.action.u.self_prot.action_code) {
case WLAN_SP_MESH_PEERING_OPEN:
case WLAN_SP_MESH_PEERING_CLOSE:
case WLAN_SP_MESH_PEERING_CONFIRM:
mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
break;
}
break;
case WLAN_CATEGORY_MESH_ACTION:
if (mesh_action_is_path_sel(mgmt))
mesh_rx_path_sel_frame(sdata, mgmt, len);
break;
case WLAN_CATEGORY_SPECTRUM_MGMT:
mesh_rx_csa_frame(sdata, mgmt, len);
break;
}
}
void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
u16 stype;
sdata_lock(sdata);
/* mesh already went down */
if (!sdata->u.mesh.mesh_id_len)
goto out;
rx_status = IEEE80211_SKB_RXCB(skb);
mgmt = (struct ieee80211_mgmt *) skb->data;
stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
switch (stype) {
case IEEE80211_STYPE_PROBE_RESP:
case IEEE80211_STYPE_BEACON:
ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len,
rx_status);
break;
case IEEE80211_STYPE_PROBE_REQ:
ieee80211_mesh_rx_probe_req(sdata, mgmt, skb->len);
break;
case IEEE80211_STYPE_ACTION:
ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
break;
}
out:
sdata_unlock(sdata);
}
static void mesh_bss_info_changed(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u32 bit;
u64 changed = 0;
for_each_set_bit(bit, &ifmsh->mbss_changed,
sizeof(changed) * BITS_PER_BYTE) {
clear_bit(bit, &ifmsh->mbss_changed);
changed |= BIT(bit);
}
if (sdata->vif.bss_conf.enable_beacon &&
(changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_HT |
BSS_CHANGED_BASIC_RATES |
BSS_CHANGED_BEACON_INT)))
if (ieee80211_mesh_rebuild_beacon(sdata))
return;
ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed);
}
void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
sdata_lock(sdata);
/* mesh already went down */
if (!sdata->u.mesh.mesh_id_len)
goto out;
if (ifmsh->preq_queue_len &&
time_after(jiffies,
ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
mesh_path_start_discovery(sdata);
if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
ieee80211_mesh_housekeeping(sdata);
if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags))
ieee80211_mesh_rootpath(sdata);
if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags))
mesh_sync_adjust_tsf(sdata);
if (test_and_clear_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags))
mesh_bss_info_changed(sdata);
out:
sdata_unlock(sdata);
}
void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
static u8 zero_addr[ETH_ALEN] = {};
timer_setup(&ifmsh->housekeeping_timer,
ieee80211_mesh_housekeeping_timer, 0);
ifmsh->accepting_plinks = true;
atomic_set(&ifmsh->mpaths, 0);
mesh_rmc_init(sdata);
ifmsh->last_preq = jiffies;
ifmsh->next_perr = jiffies;
ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
/* Allocate all mesh structures when creating the first mesh interface. */
if (!mesh_allocated)
ieee80211s_init();
mesh_pathtbl_init(sdata);
timer_setup(&ifmsh->mesh_path_timer, ieee80211_mesh_path_timer, 0);
timer_setup(&ifmsh->mesh_path_root_timer,
ieee80211_mesh_path_root_timer, 0);
INIT_LIST_HEAD(&ifmsh->preq_queue.list);
skb_queue_head_init(&ifmsh->ps.bc_buf);
spin_lock_init(&ifmsh->mesh_preq_queue_lock);
spin_lock_init(&ifmsh->sync_offset_lock);
RCU_INIT_POINTER(ifmsh->beacon, NULL);
sdata->vif.bss_conf.bssid = zero_addr;
}
void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata)
{
mesh_rmc_free(sdata);
mesh_pathtbl_unregister(sdata);
}
| linux-master | net/mac80211/mesh.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
* Copyright (C) 2016 Intel Deutschland GmbH
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include "driver-ops.h"
#include "key.h"
#include "tkip.h"
#include "wep.h"
#define PHASE1_LOOP_COUNT 8
/*
* 2-byte by 2-byte subset of the full AES S-box table; second part of this
* table is identical to first part but byte-swapped
*/
static const u16 tkip_sbox[256] =
{
0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
};
static u16 tkipS(u16 val)
{
return tkip_sbox[val & 0xff] ^ swab16(tkip_sbox[val >> 8]);
}
static u8 *write_tkip_iv(u8 *pos, u16 iv16)
{
*pos++ = iv16 >> 8;
*pos++ = ((iv16 >> 8) | 0x20) & 0x7f;
*pos++ = iv16 & 0xFF;
return pos;
}
/*
* P1K := Phase1(TA, TK, TSC)
* TA = transmitter address (48 bits)
* TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits)
* TSC = TKIP sequence counter (48 bits, only 32 msb bits used)
* P1K: 80 bits
*/
static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
const u8 *ta, u32 tsc_IV32)
{
int i, j;
u16 *p1k = ctx->p1k;
p1k[0] = tsc_IV32 & 0xFFFF;
p1k[1] = tsc_IV32 >> 16;
p1k[2] = get_unaligned_le16(ta + 0);
p1k[3] = get_unaligned_le16(ta + 2);
p1k[4] = get_unaligned_le16(ta + 4);
for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
j = 2 * (i & 1);
p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j));
p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j));
p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j));
p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
}
ctx->state = TKIP_STATE_PHASE1_DONE;
ctx->p1k_iv32 = tsc_IV32;
}
static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
u16 tsc_IV16, u8 *rc4key)
{
u16 ppk[6];
const u16 *p1k = ctx->p1k;
int i;
ppk[0] = p1k[0];
ppk[1] = p1k[1];
ppk[2] = p1k[2];
ppk[3] = p1k[3];
ppk[4] = p1k[4];
ppk[5] = p1k[4] + tsc_IV16;
ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0));
ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2));
ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4));
ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6));
ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8));
ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10));
ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1);
ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1);
ppk[2] += ror16(ppk[1], 1);
ppk[3] += ror16(ppk[2], 1);
ppk[4] += ror16(ppk[3], 1);
ppk[5] += ror16(ppk[4], 1);
rc4key = write_tkip_iv(rc4key, tsc_IV16);
*rc4key++ = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF;
for (i = 0; i < 6; i++)
put_unaligned_le16(ppk[i], rc4key + 2 * i);
}
/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets
* of the IV. Returns pointer to the octet following IVs (i.e., beginning of
* the packet payload). */
u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key_conf *keyconf, u64 pn)
{
pos = write_tkip_iv(pos, TKIP_PN_TO_IV16(pn));
*pos++ = (keyconf->keyidx << 6) | (1 << 5) /* Ext IV */;
put_unaligned_le32(TKIP_PN_TO_IV32(pn), pos);
return pos + 4;
}
EXPORT_SYMBOL_GPL(ieee80211_tkip_add_iv);
static void ieee80211_compute_tkip_p1k(struct ieee80211_key *key, u32 iv32)
{
struct ieee80211_sub_if_data *sdata = key->sdata;
struct tkip_ctx *ctx = &key->u.tkip.tx;
const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
lockdep_assert_held(&key->u.tkip.txlock);
/*
* Update the P1K when the IV32 is different from the value it
* had when we last computed it (or when not initialised yet).
* This might flip-flop back and forth if packets are processed
* out-of-order due to the different ACs, but then we have to
* just compute the P1K more often.
*/
if (ctx->p1k_iv32 != iv32 || ctx->state == TKIP_STATE_NOT_INIT)
tkip_mixing_phase1(tk, ctx, sdata->vif.addr, iv32);
}
void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf,
u32 iv32, u16 *p1k)
{
struct ieee80211_key *key = (struct ieee80211_key *)
container_of(keyconf, struct ieee80211_key, conf);
struct tkip_ctx *ctx = &key->u.tkip.tx;
spin_lock_bh(&key->u.tkip.txlock);
ieee80211_compute_tkip_p1k(key, iv32);
memcpy(p1k, ctx->p1k, sizeof(ctx->p1k));
spin_unlock_bh(&key->u.tkip.txlock);
}
EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv);
void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf,
const u8 *ta, u32 iv32, u16 *p1k)
{
const u8 *tk = &keyconf->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
struct tkip_ctx ctx;
tkip_mixing_phase1(tk, &ctx, ta, iv32);
memcpy(p1k, ctx.p1k, sizeof(ctx.p1k));
}
EXPORT_SYMBOL(ieee80211_get_tkip_rx_p1k);
void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
struct sk_buff *skb, u8 *p2k)
{
struct ieee80211_key *key = (struct ieee80211_key *)
container_of(keyconf, struct ieee80211_key, conf);
const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
struct tkip_ctx *ctx = &key->u.tkip.tx;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
u32 iv32 = get_unaligned_le32(&data[4]);
u16 iv16 = data[2] | (data[0] << 8);
spin_lock(&key->u.tkip.txlock);
ieee80211_compute_tkip_p1k(key, iv32);
tkip_mixing_phase2(tk, ctx, iv16, p2k);
spin_unlock(&key->u.tkip.txlock);
}
EXPORT_SYMBOL(ieee80211_get_tkip_p2k);
/*
* Encrypt packet payload with TKIP using @key. @pos is a pointer to the
* beginning of the buffer containing payload. This payload must include
* the IV/Ext.IV and space for (taildroom) four octets for ICV.
* @payload_len is the length of payload (_not_ including IV/ICV length).
* @ta is the transmitter addresses.
*/
int ieee80211_tkip_encrypt_data(struct arc4_ctx *ctx,
struct ieee80211_key *key,
struct sk_buff *skb,
u8 *payload, size_t payload_len)
{
u8 rc4key[16];
ieee80211_get_tkip_p2k(&key->conf, skb, rc4key);
return ieee80211_wep_encrypt_data(ctx, rc4key, 16,
payload, payload_len);
}
/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the
* beginning of the buffer containing IEEE 802.11 header payload, i.e.,
* including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the
* length of payload, including IV, Ext. IV, MIC, ICV. */
int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx,
struct ieee80211_key *key,
u8 *payload, size_t payload_len, u8 *ta,
u8 *ra, int only_iv, int queue,
u32 *out_iv32, u16 *out_iv16)
{
u32 iv32;
u32 iv16;
u8 rc4key[16], keyid, *pos = payload;
int res;
const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
struct tkip_ctx_rx *rx_ctx = &key->u.tkip.rx[queue];
if (payload_len < 12)
return -1;
iv16 = (pos[0] << 8) | pos[2];
keyid = pos[3];
iv32 = get_unaligned_le32(pos + 4);
pos += 8;
if (!(keyid & (1 << 5)))
return TKIP_DECRYPT_NO_EXT_IV;
if ((keyid >> 6) != key->conf.keyidx)
return TKIP_DECRYPT_INVALID_KEYIDX;
/* Reject replays if the received TSC is smaller than or equal to the
* last received value in a valid message, but with an exception for
* the case where a new key has been set and no valid frame using that
* key has yet received and the local RSC was initialized to 0. This
* exception allows the very first frame sent by the transmitter to be
* accepted even if that transmitter were to use TSC 0 (IEEE 802.11
* described TSC to be initialized to 1 whenever a new key is taken into
* use).
*/
if (iv32 < rx_ctx->iv32 ||
(iv32 == rx_ctx->iv32 &&
(iv16 < rx_ctx->iv16 ||
(iv16 == rx_ctx->iv16 &&
(rx_ctx->iv32 || rx_ctx->iv16 ||
rx_ctx->ctx.state != TKIP_STATE_NOT_INIT)))))
return TKIP_DECRYPT_REPLAY;
if (only_iv) {
res = TKIP_DECRYPT_OK;
rx_ctx->ctx.state = TKIP_STATE_PHASE1_HW_UPLOADED;
goto done;
}
if (rx_ctx->ctx.state == TKIP_STATE_NOT_INIT ||
rx_ctx->iv32 != iv32) {
/* IV16 wrapped around - perform TKIP phase 1 */
tkip_mixing_phase1(tk, &rx_ctx->ctx, ta, iv32);
}
if (key->local->ops->update_tkip_key &&
key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
rx_ctx->ctx.state != TKIP_STATE_PHASE1_HW_UPLOADED) {
struct ieee80211_sub_if_data *sdata = key->sdata;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(key->sdata->bss,
struct ieee80211_sub_if_data, u.ap);
drv_update_tkip_key(key->local, sdata, &key->conf, key->sta,
iv32, rx_ctx->ctx.p1k);
rx_ctx->ctx.state = TKIP_STATE_PHASE1_HW_UPLOADED;
}
tkip_mixing_phase2(tk, &rx_ctx->ctx, iv16, rc4key);
res = ieee80211_wep_decrypt_data(ctx, rc4key, 16, pos, payload_len - 12);
done:
if (res == TKIP_DECRYPT_OK) {
/*
* Record previously received IV, will be copied into the
* key information after MIC verification. It is possible
* that we don't catch replays of fragments but that's ok
* because the Michael MIC verication will then fail.
*/
*out_iv32 = iv32;
*out_iv16 = iv16;
}
return res;
}
| linux-master | net/mac80211/tkip.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2012-2013, Marco Porsch <[email protected]>
* Copyright 2012-2013, cozybit Inc.
* Copyright (C) 2021 Intel Corporation
* Copyright (C) 2023 Intel Corporation
*/
#include "mesh.h"
#include "wme.h"
/* mesh PS management */
/**
* mps_qos_null_get - create pre-addressed QoS Null frame for mesh powersave
* @sta: the station to get the frame for
*/
static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *nullfunc; /* use 4addr header */
struct sk_buff *skb;
int size = sizeof(*nullfunc);
__le16 fc;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + size + 2);
if (!skb)
return NULL;
skb_reserve(skb, local->hw.extra_tx_headroom);
nullfunc = skb_put(skb, size);
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
ieee80211_fill_mesh_addresses(nullfunc, &fc, sta->sta.addr,
sdata->vif.addr);
nullfunc->frame_control = fc;
nullfunc->duration_id = 0;
nullfunc->seq_ctrl = 0;
/* no address resolution for this frame -> set addr 1 immediately */
memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
skb_put_zero(skb, 2); /* append QoS control field */
ieee80211_mps_set_frame_flags(sdata, sta, nullfunc);
return skb;
}
/**
* mps_qos_null_tx - send a QoS Null to indicate link-specific power mode
* @sta: the station to send to
*/
static void mps_qos_null_tx(struct sta_info *sta)
{
struct sk_buff *skb;
skb = mps_qos_null_get(sta);
if (!skb)
return;
mps_dbg(sta->sdata, "announcing peer-specific power mode to %pM\n",
sta->sta.addr);
/* don't unintentionally start a MPSP */
if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
u8 *qc = ieee80211_get_qos_ctl((void *) skb->data);
qc[0] |= IEEE80211_QOS_CTL_EOSP;
}
ieee80211_tx_skb(sta->sdata, skb);
}
/**
* ieee80211_mps_local_status_update - track status of local link-specific PMs
*
* @sdata: local mesh subif
*
* sets the non-peer power mode and triggers the driver PS (re-)configuration
* Return BSS_CHANGED_BEACON if a beacon update is necessary.
*/
u64 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct sta_info *sta;
bool peering = false;
int light_sleep_cnt = 0;
int deep_sleep_cnt = 0;
u64 changed = 0;
enum nl80211_mesh_power_mode nonpeer_pm;
rcu_read_lock();
list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
if (sdata != sta->sdata)
continue;
switch (sta->mesh->plink_state) {
case NL80211_PLINK_OPN_SNT:
case NL80211_PLINK_OPN_RCVD:
case NL80211_PLINK_CNF_RCVD:
peering = true;
break;
case NL80211_PLINK_ESTAB:
if (sta->mesh->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
light_sleep_cnt++;
else if (sta->mesh->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
deep_sleep_cnt++;
break;
default:
break;
}
}
rcu_read_unlock();
/*
* Set non-peer mode to active during peering/scanning/authentication
* (see IEEE802.11-2012 13.14.8.3). The non-peer mesh power mode is
* deep sleep if the local STA is in light or deep sleep towards at
* least one mesh peer (see 13.14.3.1). Otherwise, set it to the
* user-configured default value.
*/
if (peering) {
mps_dbg(sdata, "setting non-peer PM to active for peering\n");
nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
} else if (light_sleep_cnt || deep_sleep_cnt) {
mps_dbg(sdata, "setting non-peer PM to deep sleep\n");
nonpeer_pm = NL80211_MESH_POWER_DEEP_SLEEP;
} else {
mps_dbg(sdata, "setting non-peer PM to user value\n");
nonpeer_pm = ifmsh->mshcfg.power_mode;
}
/* need update if sleep counts move between 0 and non-zero */
if (ifmsh->nonpeer_pm != nonpeer_pm ||
!ifmsh->ps_peers_light_sleep != !light_sleep_cnt ||
!ifmsh->ps_peers_deep_sleep != !deep_sleep_cnt)
changed = BSS_CHANGED_BEACON;
ifmsh->nonpeer_pm = nonpeer_pm;
ifmsh->ps_peers_light_sleep = light_sleep_cnt;
ifmsh->ps_peers_deep_sleep = deep_sleep_cnt;
return changed;
}
/**
* ieee80211_mps_set_sta_local_pm - set local PM towards a mesh STA
*
* @sta: mesh STA
* @pm: the power mode to set
* Return BSS_CHANGED_BEACON if a beacon update is in order.
*/
u64 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
enum nl80211_mesh_power_mode pm)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
if (sta->mesh->local_pm == pm)
return 0;
mps_dbg(sdata, "local STA operates in mode %d with %pM\n",
pm, sta->sta.addr);
sta->mesh->local_pm = pm;
/*
* announce peer-specific power mode transition
* (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3)
*/
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
mps_qos_null_tx(sta);
return ieee80211_mps_local_status_update(sdata);
}
/**
* ieee80211_mps_set_frame_flags - set mesh PS flags in FC (and QoS Control)
*
* @sdata: local mesh subif
* @sta: mesh STA
* @hdr: 802.11 frame header
*
* see IEEE802.11-2012 8.2.4.1.7 and 8.2.4.5.11
*
* NOTE: sta must be given when an individually-addressed QoS frame header
* is handled, for group-addressed and management frames it is not used
*/
void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_hdr *hdr)
{
enum nl80211_mesh_power_mode pm;
u8 *qc;
if (WARN_ON(is_unicast_ether_addr(hdr->addr1) &&
ieee80211_is_data_qos(hdr->frame_control) &&
!sta))
return;
if (is_unicast_ether_addr(hdr->addr1) &&
ieee80211_is_data_qos(hdr->frame_control) &&
sta->mesh->plink_state == NL80211_PLINK_ESTAB)
pm = sta->mesh->local_pm;
else
pm = sdata->u.mesh.nonpeer_pm;
if (pm == NL80211_MESH_POWER_ACTIVE)
hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_PM);
else
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
if (!ieee80211_is_data_qos(hdr->frame_control))
return;
qc = ieee80211_get_qos_ctl(hdr);
if ((is_unicast_ether_addr(hdr->addr1) &&
pm == NL80211_MESH_POWER_DEEP_SLEEP) ||
(is_multicast_ether_addr(hdr->addr1) &&
sdata->u.mesh.ps_peers_deep_sleep > 0))
qc[1] |= (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8);
else
qc[1] &= ~(IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8);
}
/**
* ieee80211_mps_sta_status_update - update buffering status of neighbor STA
*
* @sta: mesh STA
*
* called after change of peering status or non-peer/peer-specific power mode
*/
void ieee80211_mps_sta_status_update(struct sta_info *sta)
{
enum nl80211_mesh_power_mode pm;
bool do_buffer;
/* For non-assoc STA, prevent buffering or frame transmission */
if (sta->sta_state < IEEE80211_STA_ASSOC)
return;
/*
* use peer-specific power mode if peering is established and the
* peer's power mode is known
*/
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
sta->mesh->peer_pm != NL80211_MESH_POWER_UNKNOWN)
pm = sta->mesh->peer_pm;
else
pm = sta->mesh->nonpeer_pm;
do_buffer = (pm != NL80211_MESH_POWER_ACTIVE);
/* clear the MPSP flags for non-peers or active STA */
if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
} else if (!do_buffer) {
clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
}
/* Don't let the same PS state be set twice */
if (test_sta_flag(sta, WLAN_STA_PS_STA) == do_buffer)
return;
if (do_buffer) {
set_sta_flag(sta, WLAN_STA_PS_STA);
atomic_inc(&sta->sdata->u.mesh.ps.num_sta_ps);
mps_dbg(sta->sdata, "start PS buffering frames towards %pM\n",
sta->sta.addr);
} else {
ieee80211_sta_ps_deliver_wakeup(sta);
}
}
static void mps_set_sta_peer_pm(struct sta_info *sta,
struct ieee80211_hdr *hdr)
{
enum nl80211_mesh_power_mode pm;
u8 *qc = ieee80211_get_qos_ctl(hdr);
/*
* Test Power Management field of frame control (PW) and
* mesh power save level subfield of QoS control field (PSL)
*
* | PM | PSL| Mesh PM |
* +----+----+---------+
* | 0 |Rsrv| Active |
* | 1 | 0 | Light |
* | 1 | 1 | Deep |
*/
if (ieee80211_has_pm(hdr->frame_control)) {
if (qc[1] & (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8))
pm = NL80211_MESH_POWER_DEEP_SLEEP;
else
pm = NL80211_MESH_POWER_LIGHT_SLEEP;
} else {
pm = NL80211_MESH_POWER_ACTIVE;
}
if (sta->mesh->peer_pm == pm)
return;
mps_dbg(sta->sdata, "STA %pM enters mode %d\n",
sta->sta.addr, pm);
sta->mesh->peer_pm = pm;
ieee80211_mps_sta_status_update(sta);
}
static void mps_set_sta_nonpeer_pm(struct sta_info *sta,
struct ieee80211_hdr *hdr)
{
enum nl80211_mesh_power_mode pm;
if (ieee80211_has_pm(hdr->frame_control))
pm = NL80211_MESH_POWER_DEEP_SLEEP;
else
pm = NL80211_MESH_POWER_ACTIVE;
if (sta->mesh->nonpeer_pm == pm)
return;
mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n",
sta->sta.addr, pm);
sta->mesh->nonpeer_pm = pm;
ieee80211_mps_sta_status_update(sta);
}
/**
* ieee80211_mps_rx_h_sta_process - frame receive handler for mesh powersave
*
* @sta: STA info that transmitted the frame
* @hdr: IEEE 802.11 (QoS) Header
*/
void ieee80211_mps_rx_h_sta_process(struct sta_info *sta,
struct ieee80211_hdr *hdr)
{
if (is_unicast_ether_addr(hdr->addr1) &&
ieee80211_is_data_qos(hdr->frame_control)) {
/*
* individually addressed QoS Data/Null frames contain
* peer link-specific PS mode towards the local STA
*/
mps_set_sta_peer_pm(sta, hdr);
/* check for mesh Peer Service Period trigger frames */
ieee80211_mpsp_trigger_process(ieee80211_get_qos_ctl(hdr),
sta, false, false);
} else {
/*
* can only determine non-peer PS mode
* (see IEEE802.11-2012 8.2.4.1.7)
*/
mps_set_sta_nonpeer_pm(sta, hdr);
}
}
/* mesh PS frame release */
static void mpsp_trigger_send(struct sta_info *sta, bool rspi, bool eosp)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct sk_buff *skb;
struct ieee80211_hdr *nullfunc;
struct ieee80211_tx_info *info;
u8 *qc;
skb = mps_qos_null_get(sta);
if (!skb)
return;
nullfunc = (struct ieee80211_hdr *) skb->data;
if (!eosp)
nullfunc->frame_control |=
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
/*
* | RSPI | EOSP | MPSP triggering |
* +------+------+--------------------+
* | 0 | 0 | local STA is owner |
* | 0 | 1 | no MPSP (MPSP end) |
* | 1 | 0 | both STA are owner |
* | 1 | 1 | peer STA is owner | see IEEE802.11-2012 13.14.9.2
*/
qc = ieee80211_get_qos_ctl(nullfunc);
if (rspi)
qc[1] |= (IEEE80211_QOS_CTL_RSPI >> 8);
if (eosp)
qc[0] |= IEEE80211_QOS_CTL_EOSP;
info = IEEE80211_SKB_CB(skb);
info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
IEEE80211_TX_CTL_REQ_TX_STATUS;
mps_dbg(sdata, "sending MPSP trigger%s%s to %pM\n",
rspi ? " RSPI" : "", eosp ? " EOSP" : "", sta->sta.addr);
ieee80211_tx_skb(sdata, skb);
}
/**
* mpsp_qos_null_append - append QoS Null frame to MPSP skb queue if needed
* @sta: the station to handle
* @frames: the frame list to append to
*
* To properly end a mesh MPSP the last transmitted frame has to set the EOSP
* flag in the QoS Control field. In case the current tailing frame is not a
* QoS Data frame, append a QoS Null to carry the flag.
*/
static void mpsp_qos_null_append(struct sta_info *sta,
struct sk_buff_head *frames)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct sk_buff *new_skb, *skb = skb_peek_tail(frames);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info;
if (ieee80211_is_data_qos(hdr->frame_control))
return;
new_skb = mps_qos_null_get(sta);
if (!new_skb)
return;
mps_dbg(sdata, "appending QoS Null in MPSP towards %pM\n",
sta->sta.addr);
/*
* This frame has to be transmitted last. Assign lowest priority to
* make sure it cannot pass other frames when releasing multiple ACs.
*/
new_skb->priority = 1;
skb_set_queue_mapping(new_skb, IEEE80211_AC_BK);
ieee80211_set_qos_hdr(sdata, new_skb);
info = IEEE80211_SKB_CB(new_skb);
info->control.vif = &sdata->vif;
info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
__skb_queue_tail(frames, new_skb);
}
/**
* mps_frame_deliver - transmit frames during mesh powersave
*
* @sta: STA info to transmit to
* @n_frames: number of frames to transmit. -1 for all
*/
static void mps_frame_deliver(struct sta_info *sta, int n_frames)
{
struct ieee80211_local *local = sta->sdata->local;
int ac;
struct sk_buff_head frames;
struct sk_buff *skb;
bool more_data = false;
skb_queue_head_init(&frames);
/* collect frame(s) from buffers */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
while (n_frames != 0) {
skb = skb_dequeue(&sta->tx_filtered[ac]);
if (!skb) {
skb = skb_dequeue(
&sta->ps_tx_buf[ac]);
if (skb)
local->total_ps_buffered--;
}
if (!skb)
break;
n_frames--;
__skb_queue_tail(&frames, skb);
}
if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
!skb_queue_empty(&sta->ps_tx_buf[ac]))
more_data = true;
}
/* nothing to send? -> EOSP */
if (skb_queue_empty(&frames)) {
mpsp_trigger_send(sta, false, true);
return;
}
/* in a MPSP make sure the last skb is a QoS Data frame */
if (test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
mpsp_qos_null_append(sta, &frames);
mps_dbg(sta->sdata, "sending %d frames to PS STA %pM\n",
skb_queue_len(&frames), sta->sta.addr);
/* prepare collected frames for transmission */
skb_queue_walk(&frames, skb) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *) skb->data;
/*
* Tell TX path to send this frame even though the
* STA may still remain is PS mode after this frame
* exchange.
*/
info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
if (more_data || !skb_queue_is_last(&frames, skb))
hdr->frame_control |=
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
else
hdr->frame_control &=
cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
if (skb_queue_is_last(&frames, skb) &&
ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qoshdr = ieee80211_get_qos_ctl(hdr);
/* MPSP trigger frame ends service period */
*qoshdr |= IEEE80211_QOS_CTL_EOSP;
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
}
}
ieee80211_add_pending_skbs(local, &frames);
sta_info_recalc_tim(sta);
}
/**
* ieee80211_mpsp_trigger_process - track status of mesh Peer Service Periods
*
* @qc: QoS Control field
* @sta: peer to start a MPSP with
* @tx: frame was transmitted by the local STA
* @acked: frame has been transmitted successfully
*
* NOTE: active mode STA may only serve as MPSP owner
*/
void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta,
bool tx, bool acked)
{
u8 rspi = qc[1] & (IEEE80211_QOS_CTL_RSPI >> 8);
u8 eosp = qc[0] & IEEE80211_QOS_CTL_EOSP;
if (tx) {
if (rspi && acked)
set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
if (eosp)
clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
else if (acked &&
test_sta_flag(sta, WLAN_STA_PS_STA) &&
!test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
mps_frame_deliver(sta, -1);
} else {
if (eosp)
clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
else if (sta->mesh->local_pm != NL80211_MESH_POWER_ACTIVE)
set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
mps_frame_deliver(sta, -1);
}
}
/**
* ieee80211_mps_frame_release - release frames buffered due to mesh power save
*
* @sta: mesh STA
* @elems: IEs of beacon or probe response
*
* For peers if we have individually-addressed frames buffered or the peer
* indicates buffered frames, send a corresponding MPSP trigger frame. Since
* we do not evaluate the awake window duration, QoS Nulls are used as MPSP
* trigger frames. If the neighbour STA is not a peer, only send single frames.
*/
void ieee80211_mps_frame_release(struct sta_info *sta,
struct ieee802_11_elems *elems)
{
int ac, buffer_local = 0;
bool has_buffered = false;
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
sta->mesh->aid);
if (has_buffered)
mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
sta->sta.addr);
/* only transmit to PS STA with announced, non-zero awake window */
if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
(!elems->awake_window || !get_unaligned_le16(elems->awake_window)))
return;
if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
buffer_local += skb_queue_len(&sta->ps_tx_buf[ac]) +
skb_queue_len(&sta->tx_filtered[ac]);
if (!has_buffered && !buffer_local)
return;
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
mpsp_trigger_send(sta, has_buffered, !buffer_local);
else
mps_frame_deliver(sta, 1);
}
| linux-master | net/mac80211/mesh_ps.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* FILS AEAD for (Re)Association Request/Response frames
* Copyright 2016, Qualcomm Atheros, Inc.
*/
#include <crypto/aes.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <crypto/utils.h>
#include "ieee80211_i.h"
#include "aes_cmac.h"
#include "fils_aead.h"
static void gf_mulx(u8 *pad)
{
u64 a = get_unaligned_be64(pad);
u64 b = get_unaligned_be64(pad + 8);
put_unaligned_be64((a << 1) | (b >> 63), pad);
put_unaligned_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0), pad + 8);
}
static int aes_s2v(struct crypto_shash *tfm,
size_t num_elem, const u8 *addr[], size_t len[], u8 *v)
{
u8 d[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE] = {};
SHASH_DESC_ON_STACK(desc, tfm);
size_t i;
desc->tfm = tfm;
/* D = AES-CMAC(K, <zero>) */
crypto_shash_digest(desc, tmp, AES_BLOCK_SIZE, d);
for (i = 0; i < num_elem - 1; i++) {
/* D = dbl(D) xor AES_CMAC(K, Si) */
gf_mulx(d); /* dbl */
crypto_shash_digest(desc, addr[i], len[i], tmp);
crypto_xor(d, tmp, AES_BLOCK_SIZE);
}
crypto_shash_init(desc);
if (len[i] >= AES_BLOCK_SIZE) {
/* len(Sn) >= 128 */
/* T = Sn xorend D */
crypto_shash_update(desc, addr[i], len[i] - AES_BLOCK_SIZE);
crypto_xor(d, addr[i] + len[i] - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
} else {
/* len(Sn) < 128 */
/* T = dbl(D) xor pad(Sn) */
gf_mulx(d); /* dbl */
crypto_xor(d, addr[i], len[i]);
d[len[i]] ^= 0x80;
}
/* V = AES-CMAC(K, T) */
crypto_shash_finup(desc, d, AES_BLOCK_SIZE, v);
return 0;
}
/* Note: addr[] and len[] needs to have one extra slot at the end. */
static int aes_siv_encrypt(const u8 *key, size_t key_len,
const u8 *plain, size_t plain_len,
size_t num_elem, const u8 *addr[],
size_t len[], u8 *out)
{
u8 v[AES_BLOCK_SIZE];
struct crypto_shash *tfm;
struct crypto_skcipher *tfm2;
struct skcipher_request *req;
int res;
struct scatterlist src[1], dst[1];
u8 *tmp;
key_len /= 2; /* S2V key || CTR key */
addr[num_elem] = plain;
len[num_elem] = plain_len;
num_elem++;
/* S2V */
tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
/* K1 for S2V */
res = crypto_shash_setkey(tfm, key, key_len);
if (!res)
res = aes_s2v(tfm, num_elem, addr, len, v);
crypto_free_shash(tfm);
if (res)
return res;
/* Use a temporary buffer of the plaintext to handle need for
* overwriting this during AES-CTR.
*/
tmp = kmemdup(plain, plain_len, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
/* IV for CTR before encrypted data */
memcpy(out, v, AES_BLOCK_SIZE);
/* Synthetic IV to be used as the initial counter in CTR:
* Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31)
*/
v[8] &= 0x7f;
v[12] &= 0x7f;
/* CTR */
tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm2)) {
kfree(tmp);
return PTR_ERR(tfm2);
}
/* K2 for CTR */
res = crypto_skcipher_setkey(tfm2, key + key_len, key_len);
if (res)
goto fail;
req = skcipher_request_alloc(tfm2, GFP_KERNEL);
if (!req) {
res = -ENOMEM;
goto fail;
}
sg_init_one(src, tmp, plain_len);
sg_init_one(dst, out + AES_BLOCK_SIZE, plain_len);
skcipher_request_set_crypt(req, src, dst, plain_len, v);
res = crypto_skcipher_encrypt(req);
skcipher_request_free(req);
fail:
kfree(tmp);
crypto_free_skcipher(tfm2);
return res;
}
/* Note: addr[] and len[] needs to have one extra slot at the end. */
static int aes_siv_decrypt(const u8 *key, size_t key_len,
const u8 *iv_crypt, size_t iv_c_len,
size_t num_elem, const u8 *addr[], size_t len[],
u8 *out)
{
struct crypto_shash *tfm;
struct crypto_skcipher *tfm2;
struct skcipher_request *req;
struct scatterlist src[1], dst[1];
size_t crypt_len;
int res;
u8 frame_iv[AES_BLOCK_SIZE], iv[AES_BLOCK_SIZE];
u8 check[AES_BLOCK_SIZE];
crypt_len = iv_c_len - AES_BLOCK_SIZE;
key_len /= 2; /* S2V key || CTR key */
addr[num_elem] = out;
len[num_elem] = crypt_len;
num_elem++;
memcpy(iv, iv_crypt, AES_BLOCK_SIZE);
memcpy(frame_iv, iv_crypt, AES_BLOCK_SIZE);
/* Synthetic IV to be used as the initial counter in CTR:
* Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31)
*/
iv[8] &= 0x7f;
iv[12] &= 0x7f;
/* CTR */
tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm2))
return PTR_ERR(tfm2);
/* K2 for CTR */
res = crypto_skcipher_setkey(tfm2, key + key_len, key_len);
if (res) {
crypto_free_skcipher(tfm2);
return res;
}
req = skcipher_request_alloc(tfm2, GFP_KERNEL);
if (!req) {
crypto_free_skcipher(tfm2);
return -ENOMEM;
}
sg_init_one(src, iv_crypt + AES_BLOCK_SIZE, crypt_len);
sg_init_one(dst, out, crypt_len);
skcipher_request_set_crypt(req, src, dst, crypt_len, iv);
res = crypto_skcipher_decrypt(req);
skcipher_request_free(req);
crypto_free_skcipher(tfm2);
if (res)
return res;
/* S2V */
tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
/* K1 for S2V */
res = crypto_shash_setkey(tfm, key, key_len);
if (!res)
res = aes_s2v(tfm, num_elem, addr, len, check);
crypto_free_shash(tfm);
if (res)
return res;
if (memcmp(check, frame_iv, AES_BLOCK_SIZE) != 0)
return -EINVAL;
return 0;
}
int fils_encrypt_assoc_req(struct sk_buff *skb,
struct ieee80211_mgd_assoc_data *assoc_data)
{
struct ieee80211_mgmt *mgmt = (void *)skb->data;
u8 *capab, *ies, *encr;
const u8 *addr[5 + 1];
const struct element *session;
size_t len[5 + 1];
size_t crypt_len;
if (ieee80211_is_reassoc_req(mgmt->frame_control)) {
capab = (u8 *)&mgmt->u.reassoc_req.capab_info;
ies = mgmt->u.reassoc_req.variable;
} else {
capab = (u8 *)&mgmt->u.assoc_req.capab_info;
ies = mgmt->u.assoc_req.variable;
}
session = cfg80211_find_ext_elem(WLAN_EID_EXT_FILS_SESSION,
ies, skb->data + skb->len - ies);
if (!session || session->datalen != 1 + 8)
return -EINVAL;
/* encrypt after FILS Session element */
encr = (u8 *)session->data + 1 + 8;
/* AES-SIV AAD vectors */
/* The STA's MAC address */
addr[0] = mgmt->sa;
len[0] = ETH_ALEN;
/* The AP's BSSID */
addr[1] = mgmt->da;
len[1] = ETH_ALEN;
/* The STA's nonce */
addr[2] = assoc_data->fils_nonces;
len[2] = FILS_NONCE_LEN;
/* The AP's nonce */
addr[3] = &assoc_data->fils_nonces[FILS_NONCE_LEN];
len[3] = FILS_NONCE_LEN;
/* The (Re)Association Request frame from the Capability Information
* field to the FILS Session element (both inclusive).
*/
addr[4] = capab;
len[4] = encr - capab;
crypt_len = skb->data + skb->len - encr;
skb_put(skb, AES_BLOCK_SIZE);
return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
encr, crypt_len, 5, addr, len, encr);
}
int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata,
u8 *frame, size_t *frame_len,
struct ieee80211_mgd_assoc_data *assoc_data)
{
struct ieee80211_mgmt *mgmt = (void *)frame;
u8 *capab, *ies, *encr;
const u8 *addr[5 + 1];
const struct element *session;
size_t len[5 + 1];
int res;
size_t crypt_len;
if (*frame_len < 24 + 6)
return -EINVAL;
capab = (u8 *)&mgmt->u.assoc_resp.capab_info;
ies = mgmt->u.assoc_resp.variable;
session = cfg80211_find_ext_elem(WLAN_EID_EXT_FILS_SESSION,
ies, frame + *frame_len - ies);
if (!session || session->datalen != 1 + 8) {
mlme_dbg(sdata,
"No (valid) FILS Session element in (Re)Association Response frame from %pM",
mgmt->sa);
return -EINVAL;
}
/* decrypt after FILS Session element */
encr = (u8 *)session->data + 1 + 8;
/* AES-SIV AAD vectors */
/* The AP's BSSID */
addr[0] = mgmt->sa;
len[0] = ETH_ALEN;
/* The STA's MAC address */
addr[1] = mgmt->da;
len[1] = ETH_ALEN;
/* The AP's nonce */
addr[2] = &assoc_data->fils_nonces[FILS_NONCE_LEN];
len[2] = FILS_NONCE_LEN;
/* The STA's nonce */
addr[3] = assoc_data->fils_nonces;
len[3] = FILS_NONCE_LEN;
/* The (Re)Association Response frame from the Capability Information
* field to the FILS Session element (both inclusive).
*/
addr[4] = capab;
len[4] = encr - capab;
crypt_len = frame + *frame_len - encr;
if (crypt_len < AES_BLOCK_SIZE) {
mlme_dbg(sdata,
"Not enough room for AES-SIV data after FILS Session element in (Re)Association Response frame from %pM",
mgmt->sa);
return -EINVAL;
}
res = aes_siv_decrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
encr, crypt_len, 5, addr, len, encr);
if (res != 0) {
mlme_dbg(sdata,
"AES-SIV decryption of (Re)Association Response frame from %pM failed",
mgmt->sa);
return res;
}
*frame_len -= AES_BLOCK_SIZE;
return 0;
}
| linux-master | net/mac80211/fils_aead.c |
/*
* net/ife/ife.c - Inter-FE protocol based on ForCES WG InterFE LFB
* Copyright (c) 2015 Jamal Hadi Salim <[email protected]>
* Copyright (c) 2017 Yotam Gigi <[email protected]>
*
* Refer to: draft-ietf-forces-interfelfb-03 and netdev01 paper:
* "Distributing Linux Traffic Control Classifier-Action Subsystem"
* Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/module.h>
#include <linux/init.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <linux/etherdevice.h>
#include <net/ife.h>
struct ifeheadr {
__be16 metalen;
u8 tlv_data[];
};
void *ife_encode(struct sk_buff *skb, u16 metalen)
{
/* OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
* where ORIGDATA = original ethernet header ...
*/
int hdrm = metalen + IFE_METAHDRLEN;
int total_push = hdrm + skb->dev->hard_header_len;
struct ifeheadr *ifehdr;
struct ethhdr *iethh; /* inner ether header */
int skboff = 0;
int err;
err = skb_cow_head(skb, total_push);
if (unlikely(err))
return NULL;
iethh = (struct ethhdr *) skb->data;
__skb_push(skb, total_push);
memcpy(skb->data, iethh, skb->dev->hard_header_len);
skb_reset_mac_header(skb);
skboff += skb->dev->hard_header_len;
/* total metadata length */
ifehdr = (struct ifeheadr *) (skb->data + skboff);
metalen += IFE_METAHDRLEN;
ifehdr->metalen = htons(metalen);
return ifehdr->tlv_data;
}
EXPORT_SYMBOL_GPL(ife_encode);
void *ife_decode(struct sk_buff *skb, u16 *metalen)
{
struct ifeheadr *ifehdr;
int total_pull;
u16 ifehdrln;
if (!pskb_may_pull(skb, skb->dev->hard_header_len + IFE_METAHDRLEN))
return NULL;
ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len);
ifehdrln = ntohs(ifehdr->metalen);
total_pull = skb->dev->hard_header_len + ifehdrln;
if (unlikely(ifehdrln < 2))
return NULL;
if (unlikely(!pskb_may_pull(skb, total_pull)))
return NULL;
skb_set_mac_header(skb, total_pull);
__skb_pull(skb, total_pull);
*metalen = ifehdrln - IFE_METAHDRLEN;
return &ifehdr->tlv_data;
}
EXPORT_SYMBOL_GPL(ife_decode);
struct meta_tlvhdr {
__be16 type;
__be16 len;
};
static bool __ife_tlv_meta_valid(const unsigned char *skbdata,
const unsigned char *ifehdr_end)
{
const struct meta_tlvhdr *tlv;
u16 tlvlen;
if (unlikely(skbdata + sizeof(*tlv) > ifehdr_end))
return false;
tlv = (const struct meta_tlvhdr *)skbdata;
tlvlen = ntohs(tlv->len);
/* tlv length field is inc header, check on minimum */
if (tlvlen < NLA_HDRLEN)
return false;
/* overflow by NLA_ALIGN check */
if (NLA_ALIGN(tlvlen) < tlvlen)
return false;
if (unlikely(skbdata + NLA_ALIGN(tlvlen) > ifehdr_end))
return false;
return true;
}
/* Caller takes care of presenting data in network order
*/
void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
u16 *dlen, u16 *totlen)
{
struct meta_tlvhdr *tlv;
if (!__ife_tlv_meta_valid(skbdata, ifehdr_end))
return NULL;
tlv = (struct meta_tlvhdr *)skbdata;
*dlen = ntohs(tlv->len) - NLA_HDRLEN;
*attrtype = ntohs(tlv->type);
if (totlen)
*totlen = nla_total_size(*dlen);
return skbdata + sizeof(struct meta_tlvhdr);
}
EXPORT_SYMBOL_GPL(ife_tlv_meta_decode);
void *ife_tlv_meta_next(void *skbdata)
{
struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata;
u16 tlvlen = ntohs(tlv->len);
tlvlen = NLA_ALIGN(tlvlen);
return skbdata + tlvlen;
}
EXPORT_SYMBOL_GPL(ife_tlv_meta_next);
/* Caller takes care of presenting data in network order
*/
int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
{
__be32 *tlv = (__be32 *) (skbdata);
u16 totlen = nla_total_size(dlen); /*alignment + hdr */
char *dptr = (char *) tlv + NLA_HDRLEN;
u32 htlv = attrtype << 16 | (dlen + NLA_HDRLEN);
*tlv = htonl(htlv);
memset(dptr, 0, totlen - NLA_HDRLEN);
memcpy(dptr, dval, dlen);
return totlen;
}
EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
MODULE_AUTHOR("Jamal Hadi Salim <[email protected]>");
MODULE_AUTHOR("Yotam Gigi <[email protected]>");
MODULE_DESCRIPTION("Inter-FE LFB action");
MODULE_LICENSE("GPL");
| linux-master | net/ife/ife.c |
/*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/percpu.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include "rds.h"
#include "ib.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
static const char *const rds_ib_stat_names[] = {
"ib_connect_raced",
"ib_listen_closed_stale",
"ib_evt_handler_call",
"ib_tasklet_call",
"ib_tx_cq_event",
"ib_tx_ring_full",
"ib_tx_throttle",
"ib_tx_sg_mapping_failure",
"ib_tx_stalled",
"ib_tx_credit_updates",
"ib_rx_cq_event",
"ib_rx_ring_empty",
"ib_rx_refill_from_cq",
"ib_rx_refill_from_thread",
"ib_rx_alloc_limit",
"ib_rx_total_frags",
"ib_rx_total_incs",
"ib_rx_credit_updates",
"ib_ack_sent",
"ib_ack_send_failure",
"ib_ack_send_delayed",
"ib_ack_send_piggybacked",
"ib_ack_received",
"ib_rdma_mr_8k_alloc",
"ib_rdma_mr_8k_free",
"ib_rdma_mr_8k_used",
"ib_rdma_mr_8k_pool_flush",
"ib_rdma_mr_8k_pool_wait",
"ib_rdma_mr_8k_pool_depleted",
"ib_rdma_mr_1m_alloc",
"ib_rdma_mr_1m_free",
"ib_rdma_mr_1m_used",
"ib_rdma_mr_1m_pool_flush",
"ib_rdma_mr_1m_pool_wait",
"ib_rdma_mr_1m_pool_depleted",
"ib_rdma_mr_8k_reused",
"ib_rdma_mr_1m_reused",
"ib_atomic_cswp",
"ib_atomic_fadd",
};
unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail)
{
struct rds_ib_statistics stats = {0, };
uint64_t *src;
uint64_t *sum;
size_t i;
int cpu;
if (avail < ARRAY_SIZE(rds_ib_stat_names))
goto out;
for_each_online_cpu(cpu) {
src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu));
sum = (uint64_t *)&stats;
for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++)
*(sum++) += *(src++);
}
rds_stats_info_copy(iter, (uint64_t *)&stats, rds_ib_stat_names,
ARRAY_SIZE(rds_ib_stat_names));
out:
return ARRAY_SIZE(rds_ib_stat_names);
}
| linux-master | net/rds/ib_stats.c |
/*
* Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/rbtree.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include "rds.h"
/*
* This file implements the receive side of the unconventional congestion
* management in RDS.
*
* Messages waiting in the receive queue on the receiving socket are accounted
* against the sockets SO_RCVBUF option value. Only the payload bytes in the
* message are accounted for. If the number of bytes queued equals or exceeds
* rcvbuf then the socket is congested. All sends attempted to this socket's
* address should return block or return -EWOULDBLOCK.
*
* Applications are expected to be reasonably tuned such that this situation
* very rarely occurs. An application encountering this "back-pressure" is
* considered a bug.
*
* This is implemented by having each node maintain bitmaps which indicate
* which ports on bound addresses are congested. As the bitmap changes it is
* sent through all the connections which terminate in the local address of the
* bitmap which changed.
*
* The bitmaps are allocated as connections are brought up. This avoids
* allocation in the interrupt handling path which queues messages on sockets.
* The dense bitmaps let transports send the entire bitmap on any bitmap change
* reasonably efficiently. This is much easier to implement than some
* finer-grained communication of per-port congestion. The sender does a very
* inexpensive bit test to test if the port it's about to send to is congested
* or not.
*/
/*
* Interaction with poll is a tad tricky. We want all processes stuck in
* poll to wake up and check whether a congested destination became uncongested.
* The really sad thing is we have no idea which destinations the application
* wants to send to - we don't even know which rds_connections are involved.
* So until we implement a more flexible rds poll interface, we have to make
* do with this:
* We maintain a global counter that is incremented each time a congestion map
* update is received. Each rds socket tracks this value, and if rds_poll
* finds that the saved generation number is smaller than the global generation
* number, it wakes up the process.
*/
static atomic_t rds_cong_generation = ATOMIC_INIT(0);
/*
* Congestion monitoring
*/
static LIST_HEAD(rds_cong_monitor);
static DEFINE_RWLOCK(rds_cong_monitor_lock);
/*
* Yes, a global lock. It's used so infrequently that it's worth keeping it
* global to simplify the locking. It's only used in the following
* circumstances:
*
* - on connection buildup to associate a conn with its maps
* - on map changes to inform conns of a new map to send
*
* It's sadly ordered under the socket callback lock and the connection lock.
* Receive paths can mark ports congested from interrupt context so the
* lock masks interrupts.
*/
static DEFINE_SPINLOCK(rds_cong_lock);
static struct rb_root rds_cong_tree = RB_ROOT;
static struct rds_cong_map *rds_cong_tree_walk(const struct in6_addr *addr,
struct rds_cong_map *insert)
{
struct rb_node **p = &rds_cong_tree.rb_node;
struct rb_node *parent = NULL;
struct rds_cong_map *map;
while (*p) {
int diff;
parent = *p;
map = rb_entry(parent, struct rds_cong_map, m_rb_node);
diff = rds_addr_cmp(addr, &map->m_addr);
if (diff < 0)
p = &(*p)->rb_left;
else if (diff > 0)
p = &(*p)->rb_right;
else
return map;
}
if (insert) {
rb_link_node(&insert->m_rb_node, parent, p);
rb_insert_color(&insert->m_rb_node, &rds_cong_tree);
}
return NULL;
}
/*
* There is only ever one bitmap for any address. Connections try and allocate
* these bitmaps in the process getting pointers to them. The bitmaps are only
* ever freed as the module is removed after all connections have been freed.
*/
static struct rds_cong_map *rds_cong_from_addr(const struct in6_addr *addr)
{
struct rds_cong_map *map;
struct rds_cong_map *ret = NULL;
unsigned long zp;
unsigned long i;
unsigned long flags;
map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
if (!map)
return NULL;
map->m_addr = *addr;
init_waitqueue_head(&map->m_waitq);
INIT_LIST_HEAD(&map->m_conn_list);
for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
zp = get_zeroed_page(GFP_KERNEL);
if (zp == 0)
goto out;
map->m_page_addrs[i] = zp;
}
spin_lock_irqsave(&rds_cong_lock, flags);
ret = rds_cong_tree_walk(addr, map);
spin_unlock_irqrestore(&rds_cong_lock, flags);
if (!ret) {
ret = map;
map = NULL;
}
out:
if (map) {
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
free_page(map->m_page_addrs[i]);
kfree(map);
}
rdsdebug("map %p for addr %pI6c\n", ret, addr);
return ret;
}
/*
* Put the conn on its local map's list. This is called when the conn is
* really added to the hash. It's nested under the rds_conn_lock, sadly.
*/
void rds_cong_add_conn(struct rds_connection *conn)
{
unsigned long flags;
rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong);
spin_lock_irqsave(&rds_cong_lock, flags);
list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list);
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
void rds_cong_remove_conn(struct rds_connection *conn)
{
unsigned long flags;
rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong);
spin_lock_irqsave(&rds_cong_lock, flags);
list_del_init(&conn->c_map_item);
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
int rds_cong_get_maps(struct rds_connection *conn)
{
conn->c_lcong = rds_cong_from_addr(&conn->c_laddr);
conn->c_fcong = rds_cong_from_addr(&conn->c_faddr);
if (!(conn->c_lcong && conn->c_fcong))
return -ENOMEM;
return 0;
}
void rds_cong_queue_updates(struct rds_cong_map *map)
{
struct rds_connection *conn;
unsigned long flags;
spin_lock_irqsave(&rds_cong_lock, flags);
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
struct rds_conn_path *cp = &conn->c_path[0];
rcu_read_lock();
if (!test_and_set_bit(0, &conn->c_map_queued) &&
!rds_destroy_pending(cp->cp_conn)) {
rds_stats_inc(s_cong_update_queued);
/* We cannot inline the call to rds_send_xmit() here
* for two reasons (both pertaining to a TCP transport):
* 1. When we get here from the receive path, we
* are already holding the sock_lock (held by
* tcp_v4_rcv()). So inlining calls to
* tcp_setsockopt and/or tcp_sendmsg will deadlock
* when it tries to get the sock_lock())
* 2. Interrupts are masked so that we can mark the
* port congested from both send and recv paths.
* (See comment around declaration of rdc_cong_lock).
* An attempt to get the sock_lock() here will
* therefore trigger warnings.
* Defer the xmit to rds_send_worker() instead.
*/
queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
}
rcu_read_unlock();
}
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
{
rdsdebug("waking map %p for %pI4\n",
map, &map->m_addr);
rds_stats_inc(s_cong_update_received);
atomic_inc(&rds_cong_generation);
if (waitqueue_active(&map->m_waitq))
wake_up(&map->m_waitq);
if (waitqueue_active(&rds_poll_waitq))
wake_up_all(&rds_poll_waitq);
if (portmask && !list_empty(&rds_cong_monitor)) {
unsigned long flags;
struct rds_sock *rs;
read_lock_irqsave(&rds_cong_monitor_lock, flags);
list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) {
spin_lock(&rs->rs_lock);
rs->rs_cong_notify |= (rs->rs_cong_mask & portmask);
rs->rs_cong_mask &= ~portmask;
spin_unlock(&rs->rs_lock);
if (rs->rs_cong_notify)
rds_wake_sk_sleep(rs);
}
read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
}
}
EXPORT_SYMBOL_GPL(rds_cong_map_updated);
int rds_cong_updated_since(unsigned long *recent)
{
unsigned long gen = atomic_read(&rds_cong_generation);
if (likely(*recent == gen))
return 0;
*recent = gen;
return 1;
}
/*
* We're called under the locking that protects the sockets receive buffer
* consumption. This makes it a lot easier for the caller to only call us
* when it knows that an existing set bit needs to be cleared, and vice versa.
* We can't block and we need to deal with concurrent sockets working against
* the same per-address map.
*/
void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
rdsdebug("setting congestion for %pI4:%u in map %p\n",
&map->m_addr, ntohs(port), map);
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
set_bit_le(off, (void *)map->m_page_addrs[i]);
}
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
rdsdebug("clearing congestion for %pI4:%u in map %p\n",
&map->m_addr, ntohs(port), map);
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
clear_bit_le(off, (void *)map->m_page_addrs[i]);
}
static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
return test_bit_le(off, (void *)map->m_page_addrs[i]);
}
void rds_cong_add_socket(struct rds_sock *rs)
{
unsigned long flags;
write_lock_irqsave(&rds_cong_monitor_lock, flags);
if (list_empty(&rs->rs_cong_list))
list_add(&rs->rs_cong_list, &rds_cong_monitor);
write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
}
void rds_cong_remove_socket(struct rds_sock *rs)
{
unsigned long flags;
struct rds_cong_map *map;
write_lock_irqsave(&rds_cong_monitor_lock, flags);
list_del_init(&rs->rs_cong_list);
write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
/* update congestion map for now-closed port */
spin_lock_irqsave(&rds_cong_lock, flags);
map = rds_cong_tree_walk(&rs->rs_bound_addr, NULL);
spin_unlock_irqrestore(&rds_cong_lock, flags);
if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
rds_cong_clear_bit(map, rs->rs_bound_port);
rds_cong_queue_updates(map);
}
}
int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
struct rds_sock *rs)
{
if (!rds_cong_test_bit(map, port))
return 0;
if (nonblock) {
if (rs && rs->rs_cong_monitor) {
unsigned long flags;
/* It would have been nice to have an atomic set_bit on
* a uint64_t. */
spin_lock_irqsave(&rs->rs_lock, flags);
rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port));
spin_unlock_irqrestore(&rs->rs_lock, flags);
/* Test again - a congestion update may have arrived in
* the meantime. */
if (!rds_cong_test_bit(map, port))
return 0;
}
rds_stats_inc(s_cong_send_error);
return -ENOBUFS;
}
rds_stats_inc(s_cong_send_blocked);
rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
return wait_event_interruptible(map->m_waitq,
!rds_cong_test_bit(map, port));
}
void rds_cong_exit(void)
{
struct rb_node *node;
struct rds_cong_map *map;
unsigned long i;
while ((node = rb_first(&rds_cong_tree))) {
map = rb_entry(node, struct rds_cong_map, m_rb_node);
rdsdebug("freeing map %p\n", map);
rb_erase(&map->m_rb_node, &rds_cong_tree);
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
free_page(map->m_page_addrs[i]);
kfree(map);
}
}
/*
* Allocate a RDS message containing a congestion update.
*/
struct rds_message *rds_cong_update_alloc(struct rds_connection *conn)
{
struct rds_cong_map *map = conn->c_lcong;
struct rds_message *rm;
rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
if (!IS_ERR(rm))
rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP;
return rm;
}
| linux-master | net/rds/cong.c |
/*
* Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/ipv6.h>
#include "rds.h"
#include "loop.h"
static char * const rds_trans_modules[] = {
[RDS_TRANS_IB] = "rds_rdma",
[RDS_TRANS_GAP] = NULL,
[RDS_TRANS_TCP] = "rds_tcp",
};
static struct rds_transport *transports[RDS_TRANS_COUNT];
static DECLARE_RWSEM(rds_trans_sem);
void rds_trans_register(struct rds_transport *trans)
{
BUG_ON(strlen(trans->t_name) + 1 > TRANSNAMSIZ);
down_write(&rds_trans_sem);
if (transports[trans->t_type])
printk(KERN_ERR "RDS Transport type %d already registered\n",
trans->t_type);
else {
transports[trans->t_type] = trans;
printk(KERN_INFO "Registered RDS/%s transport\n", trans->t_name);
}
up_write(&rds_trans_sem);
}
EXPORT_SYMBOL_GPL(rds_trans_register);
void rds_trans_unregister(struct rds_transport *trans)
{
down_write(&rds_trans_sem);
transports[trans->t_type] = NULL;
printk(KERN_INFO "Unregistered RDS/%s transport\n", trans->t_name);
up_write(&rds_trans_sem);
}
EXPORT_SYMBOL_GPL(rds_trans_unregister);
void rds_trans_put(struct rds_transport *trans)
{
if (trans)
module_put(trans->t_owner);
}
struct rds_transport *rds_trans_get_preferred(struct net *net,
const struct in6_addr *addr,
__u32 scope_id)
{
struct rds_transport *ret = NULL;
struct rds_transport *trans;
unsigned int i;
if (ipv6_addr_v4mapped(addr)) {
if (*(u_int8_t *)&addr->s6_addr32[3] == IN_LOOPBACKNET)
return &rds_loop_transport;
} else if (ipv6_addr_loopback(addr)) {
return &rds_loop_transport;
}
down_read(&rds_trans_sem);
for (i = 0; i < RDS_TRANS_COUNT; i++) {
trans = transports[i];
if (trans && (trans->laddr_check(net, addr, scope_id) == 0) &&
(!trans->t_owner || try_module_get(trans->t_owner))) {
ret = trans;
break;
}
}
up_read(&rds_trans_sem);
return ret;
}
struct rds_transport *rds_trans_get(int t_type)
{
struct rds_transport *ret = NULL;
struct rds_transport *trans;
down_read(&rds_trans_sem);
trans = transports[t_type];
if (!trans) {
up_read(&rds_trans_sem);
if (rds_trans_modules[t_type])
request_module(rds_trans_modules[t_type]);
down_read(&rds_trans_sem);
trans = transports[t_type];
}
if (trans && trans->t_type == t_type &&
(!trans->t_owner || try_module_get(trans->t_owner)))
ret = trans;
up_read(&rds_trans_sem);
return ret;
}
/*
* This returns the number of stats entries in the snapshot and only
* copies them using the iter if there is enough space for them. The
* caller passes in the global stats so that we can size and copy while
* holding the lock.
*/
unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail)
{
struct rds_transport *trans;
unsigned int total = 0;
unsigned int part;
int i;
rds_info_iter_unmap(iter);
down_read(&rds_trans_sem);
for (i = 0; i < RDS_TRANS_COUNT; i++) {
trans = transports[i];
if (!trans || !trans->stats_info_copy)
continue;
part = trans->stats_info_copy(iter, avail);
avail -= min(avail, part);
total += part;
}
up_read(&rds_trans_sem);
return total;
}
| linux-master | net/rds/transport.c |
/*
* Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/gfp.h>
#include <net/sock.h>
#include <linux/in.h>
#include <linux/list.h>
#include <linux/ratelimit.h>
#include <linux/export.h>
#include <linux/sizes.h>
#include "rds.h"
/* When transmitting messages in rds_send_xmit, we need to emerge from
* time to time and briefly release the CPU. Otherwise the softlock watchdog
* will kick our shin.
* Also, it seems fairer to not let one busy connection stall all the
* others.
*
* send_batch_count is the number of times we'll loop in send_xmit. Setting
* it to 0 will restore the old behavior (where we looped until we had
* drained the queue).
*/
static int send_batch_count = SZ_1K;
module_param(send_batch_count, int, 0444);
MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
static void rds_send_remove_from_sock(struct list_head *messages, int status);
/*
* Reset the send state. Callers must ensure that this doesn't race with
* rds_send_xmit().
*/
void rds_send_path_reset(struct rds_conn_path *cp)
{
struct rds_message *rm, *tmp;
unsigned long flags;
if (cp->cp_xmit_rm) {
rm = cp->cp_xmit_rm;
cp->cp_xmit_rm = NULL;
/* Tell the user the RDMA op is no longer mapped by the
* transport. This isn't entirely true (it's flushed out
* independently) but as the connection is down, there's
* no ongoing RDMA to/from that memory */
rds_message_unmapped(rm);
rds_message_put(rm);
}
cp->cp_xmit_sg = 0;
cp->cp_xmit_hdr_off = 0;
cp->cp_xmit_data_off = 0;
cp->cp_xmit_atomic_sent = 0;
cp->cp_xmit_rdma_sent = 0;
cp->cp_xmit_data_sent = 0;
cp->cp_conn->c_map_queued = 0;
cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
/* Mark messages as retransmissions, and move them to the send q */
spin_lock_irqsave(&cp->cp_lock, flags);
list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
}
list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
spin_unlock_irqrestore(&cp->cp_lock, flags);
}
EXPORT_SYMBOL_GPL(rds_send_path_reset);
static int acquire_in_xmit(struct rds_conn_path *cp)
{
return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
}
static void release_in_xmit(struct rds_conn_path *cp)
{
clear_bit(RDS_IN_XMIT, &cp->cp_flags);
smp_mb__after_atomic();
/*
* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
* hot path and finding waiters is very rare. We don't want to walk
* the system-wide hashed waitqueue buckets in the fast path only to
* almost never find waiters.
*/
if (waitqueue_active(&cp->cp_waitq))
wake_up_all(&cp->cp_waitq);
}
/*
* We're making the conscious trade-off here to only send one message
* down the connection at a time.
* Pro:
* - tx queueing is a simple fifo list
* - reassembly is optional and easily done by transports per conn
* - no per flow rx lookup at all, straight to the socket
* - less per-frag memory and wire overhead
* Con:
* - queued acks can be delayed behind large messages
* Depends:
* - small message latency is higher behind queued large messages
* - large message latency isn't starved by intervening small sends
*/
int rds_send_xmit(struct rds_conn_path *cp)
{
struct rds_connection *conn = cp->cp_conn;
struct rds_message *rm;
unsigned long flags;
unsigned int tmp;
struct scatterlist *sg;
int ret = 0;
LIST_HEAD(to_be_dropped);
int batch_count;
unsigned long send_gen = 0;
int same_rm = 0;
restart:
batch_count = 0;
/*
* sendmsg calls here after having queued its message on the send
* queue. We only have one task feeding the connection at a time. If
* another thread is already feeding the queue then we back off. This
* avoids blocking the caller and trading per-connection data between
* caches per message.
*/
if (!acquire_in_xmit(cp)) {
rds_stats_inc(s_send_lock_contention);
ret = -ENOMEM;
goto out;
}
if (rds_destroy_pending(cp->cp_conn)) {
release_in_xmit(cp);
ret = -ENETUNREACH; /* dont requeue send work */
goto out;
}
/*
* we record the send generation after doing the xmit acquire.
* if someone else manages to jump in and do some work, we'll use
* this to avoid a goto restart farther down.
*
* The acquire_in_xmit() check above ensures that only one
* caller can increment c_send_gen at any time.
*/
send_gen = READ_ONCE(cp->cp_send_gen) + 1;
WRITE_ONCE(cp->cp_send_gen, send_gen);
/*
* rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
* we do the opposite to avoid races.
*/
if (!rds_conn_path_up(cp)) {
release_in_xmit(cp);
ret = 0;
goto out;
}
if (conn->c_trans->xmit_path_prepare)
conn->c_trans->xmit_path_prepare(cp);
/*
* spin trying to push headers and data down the connection until
* the connection doesn't make forward progress.
*/
while (1) {
rm = cp->cp_xmit_rm;
if (!rm) {
same_rm = 0;
} else {
same_rm++;
if (same_rm >= 4096) {
rds_stats_inc(s_send_stuck_rm);
ret = -EAGAIN;
break;
}
}
/*
* If between sending messages, we can send a pending congestion
* map update.
*/
if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
rm = rds_cong_update_alloc(conn);
if (IS_ERR(rm)) {
ret = PTR_ERR(rm);
break;
}
rm->data.op_active = 1;
rm->m_inc.i_conn_path = cp;
rm->m_inc.i_conn = cp->cp_conn;
cp->cp_xmit_rm = rm;
}
/*
* If not already working on one, grab the next message.
*
* cp_xmit_rm holds a ref while we're sending this message down
* the connction. We can use this ref while holding the
* send_sem.. rds_send_reset() is serialized with it.
*/
if (!rm) {
unsigned int len;
batch_count++;
/* we want to process as big a batch as we can, but
* we also want to avoid softlockups. If we've been
* through a lot of messages, lets back off and see
* if anyone else jumps in
*/
if (batch_count >= send_batch_count)
goto over_batch;
spin_lock_irqsave(&cp->cp_lock, flags);
if (!list_empty(&cp->cp_send_queue)) {
rm = list_entry(cp->cp_send_queue.next,
struct rds_message,
m_conn_item);
rds_message_addref(rm);
/*
* Move the message from the send queue to the retransmit
* list right away.
*/
list_move_tail(&rm->m_conn_item,
&cp->cp_retrans);
}
spin_unlock_irqrestore(&cp->cp_lock, flags);
if (!rm)
break;
/* Unfortunately, the way Infiniband deals with
* RDMA to a bad MR key is by moving the entire
* queue pair to error state. We could possibly
* recover from that, but right now we drop the
* connection.
* Therefore, we never retransmit messages with RDMA ops.
*/
if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
(rm->rdma.op_active &&
test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
spin_lock_irqsave(&cp->cp_lock, flags);
if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
list_move(&rm->m_conn_item, &to_be_dropped);
spin_unlock_irqrestore(&cp->cp_lock, flags);
continue;
}
/* Require an ACK every once in a while */
len = ntohl(rm->m_inc.i_hdr.h_len);
if (cp->cp_unacked_packets == 0 ||
cp->cp_unacked_bytes < len) {
set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
cp->cp_unacked_packets =
rds_sysctl_max_unacked_packets;
cp->cp_unacked_bytes =
rds_sysctl_max_unacked_bytes;
rds_stats_inc(s_send_ack_required);
} else {
cp->cp_unacked_bytes -= len;
cp->cp_unacked_packets--;
}
cp->cp_xmit_rm = rm;
}
/* The transport either sends the whole rdma or none of it */
if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
rm->m_final_op = &rm->rdma;
/* The transport owns the mapped memory for now.
* You can't unmap it while it's on the send queue
*/
set_bit(RDS_MSG_MAPPED, &rm->m_flags);
ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
if (ret) {
clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
wake_up_interruptible(&rm->m_flush_wait);
break;
}
cp->cp_xmit_rdma_sent = 1;
}
if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
rm->m_final_op = &rm->atomic;
/* The transport owns the mapped memory for now.
* You can't unmap it while it's on the send queue
*/
set_bit(RDS_MSG_MAPPED, &rm->m_flags);
ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
if (ret) {
clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
wake_up_interruptible(&rm->m_flush_wait);
break;
}
cp->cp_xmit_atomic_sent = 1;
}
/*
* A number of cases require an RDS header to be sent
* even if there is no data.
* We permit 0-byte sends; rds-ping depends on this.
* However, if there are exclusively attached silent ops,
* we skip the hdr/data send, to enable silent operation.
*/
if (rm->data.op_nents == 0) {
int ops_present;
int all_ops_are_silent = 1;
ops_present = (rm->atomic.op_active || rm->rdma.op_active);
if (rm->atomic.op_active && !rm->atomic.op_silent)
all_ops_are_silent = 0;
if (rm->rdma.op_active && !rm->rdma.op_silent)
all_ops_are_silent = 0;
if (ops_present && all_ops_are_silent
&& !rm->m_rdma_cookie)
rm->data.op_active = 0;
}
if (rm->data.op_active && !cp->cp_xmit_data_sent) {
rm->m_final_op = &rm->data;
ret = conn->c_trans->xmit(conn, rm,
cp->cp_xmit_hdr_off,
cp->cp_xmit_sg,
cp->cp_xmit_data_off);
if (ret <= 0)
break;
if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
tmp = min_t(int, ret,
sizeof(struct rds_header) -
cp->cp_xmit_hdr_off);
cp->cp_xmit_hdr_off += tmp;
ret -= tmp;
}
sg = &rm->data.op_sg[cp->cp_xmit_sg];
while (ret) {
tmp = min_t(int, ret, sg->length -
cp->cp_xmit_data_off);
cp->cp_xmit_data_off += tmp;
ret -= tmp;
if (cp->cp_xmit_data_off == sg->length) {
cp->cp_xmit_data_off = 0;
sg++;
cp->cp_xmit_sg++;
BUG_ON(ret != 0 && cp->cp_xmit_sg ==
rm->data.op_nents);
}
}
if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
(cp->cp_xmit_sg == rm->data.op_nents))
cp->cp_xmit_data_sent = 1;
}
/*
* A rm will only take multiple times through this loop
* if there is a data op. Thus, if the data is sent (or there was
* none), then we're done with the rm.
*/
if (!rm->data.op_active || cp->cp_xmit_data_sent) {
cp->cp_xmit_rm = NULL;
cp->cp_xmit_sg = 0;
cp->cp_xmit_hdr_off = 0;
cp->cp_xmit_data_off = 0;
cp->cp_xmit_rdma_sent = 0;
cp->cp_xmit_atomic_sent = 0;
cp->cp_xmit_data_sent = 0;
rds_message_put(rm);
}
}
over_batch:
if (conn->c_trans->xmit_path_complete)
conn->c_trans->xmit_path_complete(cp);
release_in_xmit(cp);
/* Nuke any messages we decided not to retransmit. */
if (!list_empty(&to_be_dropped)) {
/* irqs on here, so we can put(), unlike above */
list_for_each_entry(rm, &to_be_dropped, m_conn_item)
rds_message_put(rm);
rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
}
/*
* Other senders can queue a message after we last test the send queue
* but before we clear RDS_IN_XMIT. In that case they'd back off and
* not try and send their newly queued message. We need to check the
* send queue after having cleared RDS_IN_XMIT so that their message
* doesn't get stuck on the send queue.
*
* If the transport cannot continue (i.e ret != 0), then it must
* call us when more room is available, such as from the tx
* completion handler.
*
* We have an extra generation check here so that if someone manages
* to jump in after our release_in_xmit, we'll see that they have done
* some work and we will skip our goto
*/
if (ret == 0) {
bool raced;
smp_mb();
raced = send_gen != READ_ONCE(cp->cp_send_gen);
if ((test_bit(0, &conn->c_map_queued) ||
!list_empty(&cp->cp_send_queue)) && !raced) {
if (batch_count < send_batch_count)
goto restart;
rcu_read_lock();
if (rds_destroy_pending(cp->cp_conn))
ret = -ENETUNREACH;
else
queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
rcu_read_unlock();
} else if (raced) {
rds_stats_inc(s_send_lock_queue_raced);
}
}
out:
return ret;
}
EXPORT_SYMBOL_GPL(rds_send_xmit);
static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
{
u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
assert_spin_locked(&rs->rs_lock);
BUG_ON(rs->rs_snd_bytes < len);
rs->rs_snd_bytes -= len;
if (rs->rs_snd_bytes == 0)
rds_stats_inc(s_send_queue_empty);
}
static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
is_acked_func is_acked)
{
if (is_acked)
return is_acked(rm, ack);
return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
}
/*
* This is pretty similar to what happens below in the ACK
* handling code - except that we call here as soon as we get
* the IB send completion on the RDMA op and the accompanying
* message.
*/
void rds_rdma_send_complete(struct rds_message *rm, int status)
{
struct rds_sock *rs = NULL;
struct rm_rdma_op *ro;
struct rds_notifier *notifier;
unsigned long flags;
spin_lock_irqsave(&rm->m_rs_lock, flags);
ro = &rm->rdma;
if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
ro->op_active && ro->op_notify && ro->op_notifier) {
notifier = ro->op_notifier;
rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs));
notifier->n_status = status;
spin_lock(&rs->rs_lock);
list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
spin_unlock(&rs->rs_lock);
ro->op_notifier = NULL;
}
spin_unlock_irqrestore(&rm->m_rs_lock, flags);
if (rs) {
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
}
EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
/*
* Just like above, except looks at atomic op
*/
void rds_atomic_send_complete(struct rds_message *rm, int status)
{
struct rds_sock *rs = NULL;
struct rm_atomic_op *ao;
struct rds_notifier *notifier;
unsigned long flags;
spin_lock_irqsave(&rm->m_rs_lock, flags);
ao = &rm->atomic;
if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
&& ao->op_active && ao->op_notify && ao->op_notifier) {
notifier = ao->op_notifier;
rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs));
notifier->n_status = status;
spin_lock(&rs->rs_lock);
list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
spin_unlock(&rs->rs_lock);
ao->op_notifier = NULL;
}
spin_unlock_irqrestore(&rm->m_rs_lock, flags);
if (rs) {
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
}
EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
/*
* This is the same as rds_rdma_send_complete except we
* don't do any locking - we have all the ingredients (message,
* socket, socket lock) and can just move the notifier.
*/
static inline void
__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
{
struct rm_rdma_op *ro;
struct rm_atomic_op *ao;
ro = &rm->rdma;
if (ro->op_active && ro->op_notify && ro->op_notifier) {
ro->op_notifier->n_status = status;
list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
ro->op_notifier = NULL;
}
ao = &rm->atomic;
if (ao->op_active && ao->op_notify && ao->op_notifier) {
ao->op_notifier->n_status = status;
list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
ao->op_notifier = NULL;
}
/* No need to wake the app - caller does this */
}
/*
* This removes messages from the socket's list if they're on it. The list
* argument must be private to the caller, we must be able to modify it
* without locks. The messages must have a reference held for their
* position on the list. This function will drop that reference after
* removing the messages from the 'messages' list regardless of if it found
* the messages on the socket list or not.
*/
static void rds_send_remove_from_sock(struct list_head *messages, int status)
{
unsigned long flags;
struct rds_sock *rs = NULL;
struct rds_message *rm;
while (!list_empty(messages)) {
int was_on_sock = 0;
rm = list_entry(messages->next, struct rds_message,
m_conn_item);
list_del_init(&rm->m_conn_item);
/*
* If we see this flag cleared then we're *sure* that someone
* else beat us to removing it from the sock. If we race
* with their flag update we'll get the lock and then really
* see that the flag has been cleared.
*
* The message spinlock makes sure nobody clears rm->m_rs
* while we're messing with it. It does not prevent the
* message from being removed from the socket, though.
*/
spin_lock_irqsave(&rm->m_rs_lock, flags);
if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
goto unlock_and_drop;
if (rs != rm->m_rs) {
if (rs) {
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
rs = rm->m_rs;
if (rs)
sock_hold(rds_rs_to_sk(rs));
}
if (!rs)
goto unlock_and_drop;
spin_lock(&rs->rs_lock);
if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
struct rm_rdma_op *ro = &rm->rdma;
struct rds_notifier *notifier;
list_del_init(&rm->m_sock_item);
rds_send_sndbuf_remove(rs, rm);
if (ro->op_active && ro->op_notifier &&
(ro->op_notify || (ro->op_recverr && status))) {
notifier = ro->op_notifier;
list_add_tail(¬ifier->n_list,
&rs->rs_notify_queue);
if (!notifier->n_status)
notifier->n_status = status;
rm->rdma.op_notifier = NULL;
}
was_on_sock = 1;
}
spin_unlock(&rs->rs_lock);
unlock_and_drop:
spin_unlock_irqrestore(&rm->m_rs_lock, flags);
rds_message_put(rm);
if (was_on_sock)
rds_message_put(rm);
}
if (rs) {
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
}
/*
* Transports call here when they've determined that the receiver queued
* messages up to, and including, the given sequence number. Messages are
* moved to the retrans queue when rds_send_xmit picks them off the send
* queue. This means that in the TCP case, the message may not have been
* assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
* checks the RDS_MSG_HAS_ACK_SEQ bit.
*/
void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
is_acked_func is_acked)
{
struct rds_message *rm, *tmp;
unsigned long flags;
LIST_HEAD(list);
spin_lock_irqsave(&cp->cp_lock, flags);
list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
if (!rds_send_is_acked(rm, ack, is_acked))
break;
list_move(&rm->m_conn_item, &list);
clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
}
/* order flag updates with spin locks */
if (!list_empty(&list))
smp_mb__after_atomic();
spin_unlock_irqrestore(&cp->cp_lock, flags);
/* now remove the messages from the sock list as needed */
rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
}
EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
is_acked_func is_acked)
{
WARN_ON(conn->c_trans->t_mp_capable);
rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
}
EXPORT_SYMBOL_GPL(rds_send_drop_acked);
void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest)
{
struct rds_message *rm, *tmp;
struct rds_connection *conn;
struct rds_conn_path *cp;
unsigned long flags;
LIST_HEAD(list);
/* get all the messages we're dropping under the rs lock */
spin_lock_irqsave(&rs->rs_lock, flags);
list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
if (dest &&
(!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) ||
dest->sin6_port != rm->m_inc.i_hdr.h_dport))
continue;
list_move(&rm->m_sock_item, &list);
rds_send_sndbuf_remove(rs, rm);
clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
}
/* order flag updates with the rs lock */
smp_mb__after_atomic();
spin_unlock_irqrestore(&rs->rs_lock, flags);
if (list_empty(&list))
return;
/* Remove the messages from the conn */
list_for_each_entry(rm, &list, m_sock_item) {
conn = rm->m_inc.i_conn;
if (conn->c_trans->t_mp_capable)
cp = rm->m_inc.i_conn_path;
else
cp = &conn->c_path[0];
spin_lock_irqsave(&cp->cp_lock, flags);
/*
* Maybe someone else beat us to removing rm from the conn.
* If we race with their flag update we'll get the lock and
* then really see that the flag has been cleared.
*/
if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
spin_unlock_irqrestore(&cp->cp_lock, flags);
continue;
}
list_del_init(&rm->m_conn_item);
spin_unlock_irqrestore(&cp->cp_lock, flags);
/*
* Couldn't grab m_rs_lock in top loop (lock ordering),
* but we can now.
*/
spin_lock_irqsave(&rm->m_rs_lock, flags);
spin_lock(&rs->rs_lock);
__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
spin_unlock(&rs->rs_lock);
spin_unlock_irqrestore(&rm->m_rs_lock, flags);
rds_message_put(rm);
}
rds_wake_sk_sleep(rs);
while (!list_empty(&list)) {
rm = list_entry(list.next, struct rds_message, m_sock_item);
list_del_init(&rm->m_sock_item);
rds_message_wait(rm);
/* just in case the code above skipped this message
* because RDS_MSG_ON_CONN wasn't set, run it again here
* taking m_rs_lock is the only thing that keeps us
* from racing with ack processing.
*/
spin_lock_irqsave(&rm->m_rs_lock, flags);
spin_lock(&rs->rs_lock);
__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
spin_unlock(&rs->rs_lock);
spin_unlock_irqrestore(&rm->m_rs_lock, flags);
rds_message_put(rm);
}
}
/*
* we only want this to fire once so we use the callers 'queued'. It's
* possible that another thread can race with us and remove the
* message from the flow with RDS_CANCEL_SENT_TO.
*/
static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
struct rds_conn_path *cp,
struct rds_message *rm, __be16 sport,
__be16 dport, int *queued)
{
unsigned long flags;
u32 len;
if (*queued)
goto out;
len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
/* this is the only place which holds both the socket's rs_lock
* and the connection's c_lock */
spin_lock_irqsave(&rs->rs_lock, flags);
/*
* If there is a little space in sndbuf, we don't queue anything,
* and userspace gets -EAGAIN. But poll() indicates there's send
* room. This can lead to bad behavior (spinning) if snd_bytes isn't
* freed up by incoming acks. So we check the *old* value of
* rs_snd_bytes here to allow the last msg to exceed the buffer,
* and poll() now knows no more data can be sent.
*/
if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
rs->rs_snd_bytes += len;
/* let recv side know we are close to send space exhaustion.
* This is probably not the optimal way to do it, as this
* means we set the flag on *all* messages as soon as our
* throughput hits a certain threshold.
*/
if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
rds_message_addref(rm);
sock_hold(rds_rs_to_sk(rs));
rm->m_rs = rs;
/* The code ordering is a little weird, but we're
trying to minimize the time we hold c_lock */
rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
rm->m_inc.i_conn = conn;
rm->m_inc.i_conn_path = cp;
rds_message_addref(rm);
spin_lock(&cp->cp_lock);
rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
spin_unlock(&cp->cp_lock);
rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
rm, len, rs, rs->rs_snd_bytes,
(unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
*queued = 1;
}
spin_unlock_irqrestore(&rs->rs_lock, flags);
out:
return *queued;
}
/*
* rds_message is getting to be quite complicated, and we'd like to allocate
* it all in one go. This figures out how big it needs to be up front.
*/
static int rds_rm_size(struct msghdr *msg, int num_sgs,
struct rds_iov_vector_arr *vct)
{
struct cmsghdr *cmsg;
int size = 0;
int cmsg_groups = 0;
int retval;
bool zcopy_cookie = false;
struct rds_iov_vector *iov, *tmp_iov;
if (num_sgs < 0)
return -EINVAL;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_RDS)
continue;
switch (cmsg->cmsg_type) {
case RDS_CMSG_RDMA_ARGS:
if (vct->indx >= vct->len) {
vct->len += vct->incr;
tmp_iov =
krealloc(vct->vec,
vct->len *
sizeof(struct rds_iov_vector),
GFP_KERNEL);
if (!tmp_iov) {
vct->len -= vct->incr;
return -ENOMEM;
}
vct->vec = tmp_iov;
}
iov = &vct->vec[vct->indx];
memset(iov, 0, sizeof(struct rds_iov_vector));
vct->indx++;
cmsg_groups |= 1;
retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
if (retval < 0)
return retval;
size += retval;
break;
case RDS_CMSG_ZCOPY_COOKIE:
zcopy_cookie = true;
fallthrough;
case RDS_CMSG_RDMA_DEST:
case RDS_CMSG_RDMA_MAP:
cmsg_groups |= 2;
/* these are valid but do no add any size */
break;
case RDS_CMSG_ATOMIC_CSWP:
case RDS_CMSG_ATOMIC_FADD:
case RDS_CMSG_MASKED_ATOMIC_CSWP:
case RDS_CMSG_MASKED_ATOMIC_FADD:
cmsg_groups |= 1;
size += sizeof(struct scatterlist);
break;
default:
return -EINVAL;
}
}
if ((msg->msg_flags & MSG_ZEROCOPY) && !zcopy_cookie)
return -EINVAL;
size += num_sgs * sizeof(struct scatterlist);
/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
if (cmsg_groups == 3)
return -EINVAL;
return size;
}
static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
u32 *cookie;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(*cookie)) ||
!rm->data.op_mmp_znotifier)
return -EINVAL;
cookie = CMSG_DATA(cmsg);
rm->data.op_mmp_znotifier->z_cookie = *cookie;
return 0;
}
static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
struct msghdr *msg, int *allocated_mr,
struct rds_iov_vector_arr *vct)
{
struct cmsghdr *cmsg;
int ret = 0, ind = 0;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_RDS)
continue;
/* As a side effect, RDMA_DEST and RDMA_MAP will set
* rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
*/
switch (cmsg->cmsg_type) {
case RDS_CMSG_RDMA_ARGS:
if (ind >= vct->indx)
return -ENOMEM;
ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
ind++;
break;
case RDS_CMSG_RDMA_DEST:
ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
break;
case RDS_CMSG_RDMA_MAP:
ret = rds_cmsg_rdma_map(rs, rm, cmsg);
if (!ret)
*allocated_mr = 1;
else if (ret == -ENODEV)
/* Accommodate the get_mr() case which can fail
* if connection isn't established yet.
*/
ret = -EAGAIN;
break;
case RDS_CMSG_ATOMIC_CSWP:
case RDS_CMSG_ATOMIC_FADD:
case RDS_CMSG_MASKED_ATOMIC_CSWP:
case RDS_CMSG_MASKED_ATOMIC_FADD:
ret = rds_cmsg_atomic(rs, rm, cmsg);
break;
case RDS_CMSG_ZCOPY_COOKIE:
ret = rds_cmsg_zcopy(rs, rm, cmsg);
break;
default:
return -EINVAL;
}
if (ret)
break;
}
return ret;
}
static int rds_send_mprds_hash(struct rds_sock *rs,
struct rds_connection *conn, int nonblock)
{
int hash;
if (conn->c_npaths == 0)
hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS);
else
hash = RDS_MPATH_HASH(rs, conn->c_npaths);
if (conn->c_npaths == 0 && hash != 0) {
rds_send_ping(conn, 0);
/* The underlying connection is not up yet. Need to wait
* until it is up to be sure that the non-zero c_path can be
* used. But if we are interrupted, we have to use the zero
* c_path in case the connection ends up being non-MP capable.
*/
if (conn->c_npaths == 0) {
/* Cannot wait for the connection be made, so just use
* the base c_path.
*/
if (nonblock)
return 0;
if (wait_event_interruptible(conn->c_hs_waitq,
conn->c_npaths != 0))
hash = 0;
}
if (conn->c_npaths == 1)
hash = 0;
}
return hash;
}
static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
{
struct rds_rdma_args *args;
struct cmsghdr *cmsg;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_RDS)
continue;
if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
if (cmsg->cmsg_len <
CMSG_LEN(sizeof(struct rds_rdma_args)))
return -EINVAL;
args = CMSG_DATA(cmsg);
*rdma_bytes += args->remote_vec.bytes;
}
}
return 0;
}
int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
__be16 dport;
struct rds_message *rm = NULL;
struct rds_connection *conn;
int ret = 0;
int queued = 0, allocated_mr = 0;
int nonblock = msg->msg_flags & MSG_DONTWAIT;
long timeo = sock_sndtimeo(sk, nonblock);
struct rds_conn_path *cpath;
struct in6_addr daddr;
__u32 scope_id = 0;
size_t rdma_payload_len = 0;
bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
int namelen;
struct rds_iov_vector_arr vct;
int ind;
memset(&vct, 0, sizeof(vct));
/* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
vct.incr = 1;
/* Mirror Linux UDP mirror of BSD error message compatibility */
/* XXX: Perhaps MSG_MORE someday */
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT | MSG_ZEROCOPY)) {
ret = -EOPNOTSUPP;
goto out;
}
namelen = msg->msg_namelen;
if (namelen != 0) {
if (namelen < sizeof(*usin)) {
ret = -EINVAL;
goto out;
}
switch (usin->sin_family) {
case AF_INET:
if (usin->sin_addr.s_addr == htonl(INADDR_ANY) ||
usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
ipv4_is_multicast(usin->sin_addr.s_addr)) {
ret = -EINVAL;
goto out;
}
ipv6_addr_set_v4mapped(usin->sin_addr.s_addr, &daddr);
dport = usin->sin_port;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
int addr_type;
if (namelen < sizeof(*sin6)) {
ret = -EINVAL;
goto out;
}
addr_type = ipv6_addr_type(&sin6->sin6_addr);
if (!(addr_type & IPV6_ADDR_UNICAST)) {
__be32 addr4;
if (!(addr_type & IPV6_ADDR_MAPPED)) {
ret = -EINVAL;
goto out;
}
/* It is a mapped address. Need to do some
* sanity checks.
*/
addr4 = sin6->sin6_addr.s6_addr32[3];
if (addr4 == htonl(INADDR_ANY) ||
addr4 == htonl(INADDR_BROADCAST) ||
ipv4_is_multicast(addr4)) {
ret = -EINVAL;
goto out;
}
}
if (addr_type & IPV6_ADDR_LINKLOCAL) {
if (sin6->sin6_scope_id == 0) {
ret = -EINVAL;
goto out;
}
scope_id = sin6->sin6_scope_id;
}
daddr = sin6->sin6_addr;
dport = sin6->sin6_port;
break;
}
#endif
default:
ret = -EINVAL;
goto out;
}
} else {
/* We only care about consistency with ->connect() */
lock_sock(sk);
daddr = rs->rs_conn_addr;
dport = rs->rs_conn_port;
scope_id = rs->rs_bound_scope_id;
release_sock(sk);
}
lock_sock(sk);
if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) {
release_sock(sk);
ret = -ENOTCONN;
goto out;
} else if (namelen != 0) {
/* Cannot send to an IPv4 address using an IPv6 source
* address and cannot send to an IPv6 address using an
* IPv4 source address.
*/
if (ipv6_addr_v4mapped(&daddr) ^
ipv6_addr_v4mapped(&rs->rs_bound_addr)) {
release_sock(sk);
ret = -EOPNOTSUPP;
goto out;
}
/* If the socket is already bound to a link local address,
* it can only send to peers on the same link. But allow
* communicating between link local and non-link local address.
*/
if (scope_id != rs->rs_bound_scope_id) {
if (!scope_id) {
scope_id = rs->rs_bound_scope_id;
} else if (rs->rs_bound_scope_id) {
release_sock(sk);
ret = -EINVAL;
goto out;
}
}
}
release_sock(sk);
ret = rds_rdma_bytes(msg, &rdma_payload_len);
if (ret)
goto out;
if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
ret = -EMSGSIZE;
goto out;
}
if (payload_len > rds_sk_sndbuf(rs)) {
ret = -EMSGSIZE;
goto out;
}
if (zcopy) {
if (rs->rs_transport->t_type != RDS_TRANS_TCP) {
ret = -EOPNOTSUPP;
goto out;
}
num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
}
/* size of rm including all sgs */
ret = rds_rm_size(msg, num_sgs, &vct);
if (ret < 0)
goto out;
rm = rds_message_alloc(ret, GFP_KERNEL);
if (!rm) {
ret = -ENOMEM;
goto out;
}
/* Attach data to the rm */
if (payload_len) {
rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
if (IS_ERR(rm->data.op_sg)) {
ret = PTR_ERR(rm->data.op_sg);
goto out;
}
ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
if (ret)
goto out;
}
rm->data.op_active = 1;
rm->m_daddr = daddr;
/* rds_conn_create has a spinlock that runs with IRQ off.
* Caching the conn in the socket helps a lot. */
if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) &&
rs->rs_tos == rs->rs_conn->c_tos) {
conn = rs->rs_conn;
} else {
conn = rds_conn_create_outgoing(sock_net(sock->sk),
&rs->rs_bound_addr, &daddr,
rs->rs_transport, rs->rs_tos,
sock->sk->sk_allocation,
scope_id);
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
goto out;
}
rs->rs_conn = conn;
}
if (conn->c_trans->t_mp_capable)
cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
else
cpath = &conn->c_path[0];
rm->m_conn_path = cpath;
/* Parse any control messages the user may have included. */
ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
if (ret) {
/* Trigger connection so that its ready for the next retry */
if (ret == -EAGAIN)
rds_conn_connect_if_down(conn);
goto out;
}
if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
&rm->rdma, conn->c_trans->xmit_rdma);
ret = -EOPNOTSUPP;
goto out;
}
if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
&rm->atomic, conn->c_trans->xmit_atomic);
ret = -EOPNOTSUPP;
goto out;
}
if (rds_destroy_pending(conn)) {
ret = -EAGAIN;
goto out;
}
if (rds_conn_path_down(cpath))
rds_check_all_paths(conn);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
if (ret) {
rs->rs_seen_congestion = 1;
goto out;
}
while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
dport, &queued)) {
rds_stats_inc(s_send_queue_full);
if (nonblock) {
ret = -EAGAIN;
goto out;
}
timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
rds_send_queue_rm(rs, conn, cpath, rm,
rs->rs_bound_port,
dport,
&queued),
timeo);
rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
continue;
ret = timeo;
if (ret == 0)
ret = -ETIMEDOUT;
goto out;
}
/*
* By now we've committed to the send. We reuse rds_send_worker()
* to retry sends in the rds thread if the transport asks us to.
*/
rds_stats_inc(s_send_queued);
ret = rds_send_xmit(cpath);
if (ret == -ENOMEM || ret == -EAGAIN) {
ret = 0;
rcu_read_lock();
if (rds_destroy_pending(cpath->cp_conn))
ret = -ENETUNREACH;
else
queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
rcu_read_unlock();
}
if (ret)
goto out;
rds_message_put(rm);
for (ind = 0; ind < vct.indx; ind++)
kfree(vct.vec[ind].iov);
kfree(vct.vec);
return payload_len;
out:
for (ind = 0; ind < vct.indx; ind++)
kfree(vct.vec[ind].iov);
kfree(vct.vec);
/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
* If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
* or in any other way, we need to destroy the MR again */
if (allocated_mr)
rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
if (rm)
rds_message_put(rm);
return ret;
}
/*
* send out a probe. Can be shared by rds_send_ping,
* rds_send_pong, rds_send_hb.
* rds_send_hb should use h_flags
* RDS_FLAG_HB_PING|RDS_FLAG_ACK_REQUIRED
* or
* RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED
*/
static int
rds_send_probe(struct rds_conn_path *cp, __be16 sport,
__be16 dport, u8 h_flags)
{
struct rds_message *rm;
unsigned long flags;
int ret = 0;
rm = rds_message_alloc(0, GFP_ATOMIC);
if (!rm) {
ret = -ENOMEM;
goto out;
}
rm->m_daddr = cp->cp_conn->c_faddr;
rm->data.op_active = 1;
rds_conn_path_connect_if_down(cp);
ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
if (ret)
goto out;
spin_lock_irqsave(&cp->cp_lock, flags);
list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
rds_message_addref(rm);
rm->m_inc.i_conn = cp->cp_conn;
rm->m_inc.i_conn_path = cp;
rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
cp->cp_next_tx_seq);
rm->m_inc.i_hdr.h_flags |= h_flags;
cp->cp_next_tx_seq++;
if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) &&
cp->cp_conn->c_trans->t_mp_capable) {
u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_NPATHS, &npaths,
sizeof(npaths));
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_GEN_NUM,
&my_gen_num,
sizeof(u32));
}
spin_unlock_irqrestore(&cp->cp_lock, flags);
rds_stats_inc(s_send_queued);
rds_stats_inc(s_send_pong);
/* schedule the send work on rds_wq */
rcu_read_lock();
if (!rds_destroy_pending(cp->cp_conn))
queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
rcu_read_unlock();
rds_message_put(rm);
return 0;
out:
if (rm)
rds_message_put(rm);
return ret;
}
int
rds_send_pong(struct rds_conn_path *cp, __be16 dport)
{
return rds_send_probe(cp, 0, dport, 0);
}
void
rds_send_ping(struct rds_connection *conn, int cp_index)
{
unsigned long flags;
struct rds_conn_path *cp = &conn->c_path[cp_index];
spin_lock_irqsave(&cp->cp_lock, flags);
if (conn->c_ping_triggered) {
spin_unlock_irqrestore(&cp->cp_lock, flags);
return;
}
conn->c_ping_triggered = 1;
spin_unlock_irqrestore(&cp->cp_lock, flags);
rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0);
}
EXPORT_SYMBOL_GPL(rds_send_ping);
| linux-master | net/rds/send.c |
/*
* Copyright (c) 2007, 2020 Oracle and/or its affiliates.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
#include "rds.h"
/*
* XXX
* - build with sparse
* - should we detect duplicate keys on a socket? hmm.
* - an rdma is an mlock, apply rlimit?
*/
/*
* get the number of pages by looking at the page indices that the start and
* end addresses fall in.
*
* Returns 0 if the vec is invalid. It is invalid if the number of bytes
* causes the address to wrap or overflows an unsigned int. This comes
* from being stored in the 'length' member of 'struct scatterlist'.
*/
static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
{
if ((vec->addr + vec->bytes <= vec->addr) ||
(vec->bytes > (u64)UINT_MAX))
return 0;
return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
(vec->addr >> PAGE_SHIFT);
}
static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
struct rds_mr *insert)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct rds_mr *mr;
while (*p) {
parent = *p;
mr = rb_entry(parent, struct rds_mr, r_rb_node);
if (key < mr->r_key)
p = &(*p)->rb_left;
else if (key > mr->r_key)
p = &(*p)->rb_right;
else
return mr;
}
if (insert) {
rb_link_node(&insert->r_rb_node, parent, p);
rb_insert_color(&insert->r_rb_node, root);
kref_get(&insert->r_kref);
}
return NULL;
}
/*
* Destroy the transport-specific part of a MR.
*/
static void rds_destroy_mr(struct rds_mr *mr)
{
struct rds_sock *rs = mr->r_sock;
void *trans_private = NULL;
unsigned long flags;
rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
mr->r_key, kref_read(&mr->r_kref));
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
if (!RB_EMPTY_NODE(&mr->r_rb_node))
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
trans_private = mr->r_trans_private;
mr->r_trans_private = NULL;
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (trans_private)
mr->r_trans->free_mr(trans_private, mr->r_invalidate);
}
void __rds_put_mr_final(struct kref *kref)
{
struct rds_mr *mr = container_of(kref, struct rds_mr, r_kref);
rds_destroy_mr(mr);
kfree(mr);
}
/*
* By the time this is called we can't have any more ioctls called on
* the socket so we don't need to worry about racing with others.
*/
void rds_rdma_drop_keys(struct rds_sock *rs)
{
struct rds_mr *mr;
struct rb_node *node;
unsigned long flags;
/* Release any MRs associated with this socket */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
while ((node = rb_first(&rs->rs_rdma_keys))) {
mr = rb_entry(node, struct rds_mr, r_rb_node);
if (mr->r_trans == rs->rs_transport)
mr->r_invalidate = 0;
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
kref_put(&mr->r_kref, __rds_put_mr_final);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (rs->rs_transport && rs->rs_transport->flush_mrs)
rs->rs_transport->flush_mrs();
}
/*
* Helper function to pin user pages.
*/
static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
struct page **pages, int write)
{
unsigned int gup_flags = FOLL_LONGTERM;
int ret;
if (write)
gup_flags |= FOLL_WRITE;
ret = pin_user_pages_fast(user_addr, nr_pages, gup_flags, pages);
if (ret >= 0 && ret < nr_pages) {
unpin_user_pages(pages, ret);
ret = -EFAULT;
}
return ret;
}
static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
u64 *cookie_ret, struct rds_mr **mr_ret,
struct rds_conn_path *cp)
{
struct rds_mr *mr = NULL, *found;
struct scatterlist *sg = NULL;
unsigned int nr_pages;
struct page **pages = NULL;
void *trans_private;
unsigned long flags;
rds_rdma_cookie_t cookie;
unsigned int nents = 0;
int need_odp = 0;
long i;
int ret;
if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP;
goto out;
}
/* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
*/
if (((args->vec.addr + args->vec.bytes) < args->vec.addr) ||
PAGE_ALIGN(args->vec.addr + args->vec.bytes) <
(args->vec.addr + args->vec.bytes)) {
ret = -EINVAL;
goto out;
}
if (!can_do_mlock()) {
ret = -EPERM;
goto out;
}
nr_pages = rds_pages_in_vec(&args->vec);
if (nr_pages == 0) {
ret = -EINVAL;
goto out;
}
/* Restrict the size of mr irrespective of underlying transport
* To account for unaligned mr regions, subtract one from nr_pages
*/
if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
ret = -EMSGSIZE;
goto out;
}
rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
args->vec.addr, args->vec.bytes, nr_pages);
/* XXX clamp nr_pages to limit the size of this alloc? */
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
if (!mr) {
ret = -ENOMEM;
goto out;
}
kref_init(&mr->r_kref);
RB_CLEAR_NODE(&mr->r_rb_node);
mr->r_trans = rs->rs_transport;
mr->r_sock = rs;
if (args->flags & RDS_RDMA_USE_ONCE)
mr->r_use_once = 1;
if (args->flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
if (args->flags & RDS_RDMA_READWRITE)
mr->r_write = 1;
/*
* Pin the pages that make up the user buffer and transfer the page
* pointers to the mr's sg array. We check to see if we've mapped
* the whole region after transferring the partial page references
* to the sg array so that we can have one page ref cleanup path.
*
* For now we have no flag that tells us whether the mapping is
* r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
* the zero page.
*/
ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
if (ret == -EOPNOTSUPP) {
need_odp = 1;
} else if (ret <= 0) {
goto out;
} else {
nents = ret;
sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto out;
}
WARN_ON(!nents);
sg_init_table(sg, nents);
/* Stick all pages into the scatterlist */
for (i = 0 ; i < nents; i++)
sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
rdsdebug("RDS: trans_private nents is %u\n", nents);
}
/* Obtain a transport specific MR. If this succeeds, the
* s/g list is now owned by the MR.
* Note that dma_map() implies that pending writes are
* flushed to RAM, so no dma_sync is needed here. */
trans_private = rs->rs_transport->get_mr(
sg, nents, rs, &mr->r_key, cp ? cp->cp_conn : NULL,
args->vec.addr, args->vec.bytes,
need_odp ? ODP_ZEROBASED : ODP_NOT_NEEDED);
if (IS_ERR(trans_private)) {
/* In ODP case, we don't GUP pages, so don't need
* to release anything.
*/
if (!need_odp) {
unpin_user_pages(pages, nr_pages);
kfree(sg);
}
ret = PTR_ERR(trans_private);
goto out;
}
mr->r_trans_private = trans_private;
rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
mr->r_key, (void *)(unsigned long) args->cookie_addr);
/* The user may pass us an unaligned address, but we can only
* map page aligned regions. So we keep the offset, and build
* a 64bit cookie containing <R_Key, offset> and pass that
* around. */
if (need_odp)
cookie = rds_rdma_make_cookie(mr->r_key, 0);
else
cookie = rds_rdma_make_cookie(mr->r_key,
args->vec.addr & ~PAGE_MASK);
if (cookie_ret)
*cookie_ret = cookie;
if (args->cookie_addr &&
put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) {
if (!need_odp) {
unpin_user_pages(pages, nr_pages);
kfree(sg);
}
ret = -EFAULT;
goto out;
}
/* Inserting the new MR into the rbtree bumps its
* reference count. */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
BUG_ON(found && found != mr);
rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
if (mr_ret) {
kref_get(&mr->r_kref);
*mr_ret = mr;
}
ret = 0;
out:
kfree(pages);
if (mr)
kref_put(&mr->r_kref, __rds_put_mr_final);
return ret;
}
int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
{
struct rds_get_mr_args args;
if (optlen != sizeof(struct rds_get_mr_args))
return -EINVAL;
if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_args)))
return -EFAULT;
return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
}
int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen)
{
struct rds_get_mr_for_dest_args args;
struct rds_get_mr_args new_args;
if (optlen != sizeof(struct rds_get_mr_for_dest_args))
return -EINVAL;
if (copy_from_sockptr(&args, optval,
sizeof(struct rds_get_mr_for_dest_args)))
return -EFAULT;
/*
* Initially, just behave like get_mr().
* TODO: Implement get_mr as wrapper around this
* and deprecate it.
*/
new_args.vec = args.vec;
new_args.cookie_addr = args.cookie_addr;
new_args.flags = args.flags;
return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
}
/*
* Free the MR indicated by the given R_Key
*/
int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
{
struct rds_free_mr_args args;
struct rds_mr *mr;
unsigned long flags;
if (optlen != sizeof(struct rds_free_mr_args))
return -EINVAL;
if (copy_from_sockptr(&args, optval, sizeof(struct rds_free_mr_args)))
return -EFAULT;
/* Special case - a null cookie means flush all unused MRs */
if (args.cookie == 0) {
if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
return -EINVAL;
rs->rs_transport->flush_mrs();
return 0;
}
/* Look up the MR given its R_key and remove it from the rbtree
* so nobody else finds it.
* This should also prevent races with rds_rdma_unuse.
*/
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
if (mr) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
if (args.flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (!mr)
return -EINVAL;
kref_put(&mr->r_kref, __rds_put_mr_final);
return 0;
}
/*
* This is called when we receive an extension header that
* tells us this MR was used. It allows us to implement
* use_once semantics
*/
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
{
struct rds_mr *mr;
unsigned long flags;
int zot_me = 0;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr) {
pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
r_key);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
return;
}
/* Get a reference so that the MR won't go away before calling
* sync_mr() below.
*/
kref_get(&mr->r_kref);
/* If it is going to be freed, remove it from the tree now so
* that no other thread can find it and free it.
*/
if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
zot_me = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
/* May have to issue a dma_sync on this memory region.
* Note we could avoid this if the operation was a RDMA READ,
* but at this point we can't tell. */
if (mr->r_trans->sync_mr)
mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
/* Release the reference held above. */
kref_put(&mr->r_kref, __rds_put_mr_final);
/* If the MR was marked as invalidate, this will
* trigger an async flush. */
if (zot_me)
kref_put(&mr->r_kref, __rds_put_mr_final);
}
void rds_rdma_free_op(struct rm_rdma_op *ro)
{
unsigned int i;
if (ro->op_odp_mr) {
kref_put(&ro->op_odp_mr->r_kref, __rds_put_mr_final);
} else {
for (i = 0; i < ro->op_nents; i++) {
struct page *page = sg_page(&ro->op_sg[i]);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory
*/
unpin_user_pages_dirty_lock(&page, 1, !ro->op_write);
}
}
kfree(ro->op_notifier);
ro->op_notifier = NULL;
ro->op_active = 0;
ro->op_odp_mr = NULL;
}
void rds_atomic_free_op(struct rm_atomic_op *ao)
{
struct page *page = sg_page(ao->op_sg);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
unpin_user_pages_dirty_lock(&page, 1, true);
kfree(ao->op_notifier);
ao->op_notifier = NULL;
ao->op_active = 0;
}
/*
* Count the number of pages needed to describe an incoming iovec array.
*/
static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
{
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
/* figure out the number of pages in the vector */
for (i = 0; i < nr_iovecs; i++) {
nr_pages = rds_pages_in_vec(&iov[i]);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages;
}
int rds_rdma_extra_size(struct rds_rdma_args *args,
struct rds_iov_vector *iov)
{
struct rds_iovec *vec;
struct rds_iovec __user *local_vec;
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
if (args->nr_local == 0)
return -EINVAL;
if (args->nr_local > UIO_MAXIOV)
return -EMSGSIZE;
iov->iov = kcalloc(args->nr_local,
sizeof(struct rds_iovec),
GFP_KERNEL);
if (!iov->iov)
return -ENOMEM;
vec = &iov->iov[0];
if (copy_from_user(vec, local_vec, args->nr_local *
sizeof(struct rds_iovec)))
return -EFAULT;
iov->len = args->nr_local;
/* figure out the number of pages in the vector */
for (i = 0; i < args->nr_local; i++, vec++) {
nr_pages = rds_pages_in_vec(vec);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages * sizeof(struct scatterlist);
}
/*
* The application asks for a RDMA transfer.
* Extract all arguments and set up the rdma_op
*/
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg,
struct rds_iov_vector *vec)
{
struct rds_rdma_args *args;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
struct rds_iovec *iovs;
unsigned int i, j;
int ret = 0;
bool odp_supported = true;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
|| rm->rdma.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
if (ipv6_addr_any(&rs->rs_bound_addr)) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out_ret;
}
if (args->nr_local > UIO_MAXIOV) {
ret = -EMSGSIZE;
goto out_ret;
}
if (vec->len != args->nr_local) {
ret = -EINVAL;
goto out_ret;
}
/* odp-mr is not supported for multiple requests within one message */
if (args->nr_local != 1)
odp_supported = false;
iovs = vec->iov;
nr_pages = rds_rdma_pages(iovs, args->nr_local);
if (nr_pages < 0) {
ret = -EINVAL;
goto out_ret;
}
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out_ret;
}
op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
op->op_active = 1;
op->op_recverr = rs->rs_recverr;
op->op_odp_mr = NULL;
WARN_ON(!nr_pages);
op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
if (IS_ERR(op->op_sg)) {
ret = PTR_ERR(op->op_sg);
goto out_pages;
}
if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
if (!op->op_notifier) {
ret = -ENOMEM;
goto out_pages;
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
}
/* The cookie contains the R_Key of the remote memory region, and
* optionally an offset into it. This is how we implement RDMA into
* unaligned memory.
* When setting up the RDMA, we need to add that offset to the
* destination address (which is really an offset into the MR)
* FIXME: We may want to move this into ib_rdma.c
*/
op->op_rkey = rds_rdma_cookie_key(args->cookie);
op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
nr_bytes = 0;
rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
(unsigned long long)args->nr_local,
(unsigned long long)args->remote_vec.addr,
op->op_rkey);
for (i = 0; i < args->nr_local; i++) {
struct rds_iovec *iov = &iovs[i];
/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
unsigned int nr = rds_pages_in_vec(iov);
rs->rs_user_addr = iov->addr;
rs->rs_user_bytes = iov->bytes;
/* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing.
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if ((!odp_supported && ret <= 0) ||
(odp_supported && ret <= 0 && ret != -EOPNOTSUPP))
goto out_pages;
if (ret == -EOPNOTSUPP) {
struct rds_mr *local_odp_mr;
if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP;
goto out_pages;
}
local_odp_mr =
kzalloc(sizeof(*local_odp_mr), GFP_KERNEL);
if (!local_odp_mr) {
ret = -ENOMEM;
goto out_pages;
}
RB_CLEAR_NODE(&local_odp_mr->r_rb_node);
kref_init(&local_odp_mr->r_kref);
local_odp_mr->r_trans = rs->rs_transport;
local_odp_mr->r_sock = rs;
local_odp_mr->r_trans_private =
rs->rs_transport->get_mr(
NULL, 0, rs, &local_odp_mr->r_key, NULL,
iov->addr, iov->bytes, ODP_VIRTUAL);
if (IS_ERR(local_odp_mr->r_trans_private)) {
ret = PTR_ERR(local_odp_mr->r_trans_private);
rdsdebug("get_mr ret %d %p\"", ret,
local_odp_mr->r_trans_private);
kfree(local_odp_mr);
ret = -EOPNOTSUPP;
goto out_pages;
}
rdsdebug("Need odp; local_odp_mr %p trans_private %p\n",
local_odp_mr, local_odp_mr->r_trans_private);
op->op_odp_mr = local_odp_mr;
op->op_odp_addr = iov->addr;
}
rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
nr_bytes, nr, iov->bytes, iov->addr);
nr_bytes += iov->bytes;
for (j = 0; j < nr; j++) {
unsigned int offset = iov->addr & ~PAGE_MASK;
struct scatterlist *sg;
sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j],
min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
offset);
sg_dma_len(sg) = sg->length;
rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
sg->offset, sg->length, iov->addr, iov->bytes);
iov->addr += sg->length;
iov->bytes -= sg->length;
}
op->op_nents += nr;
}
if (nr_bytes > args->remote_vec.bytes) {
rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
nr_bytes,
(unsigned int) args->remote_vec.bytes);
ret = -EINVAL;
goto out_pages;
}
op->op_bytes = nr_bytes;
ret = 0;
out_pages:
kfree(pages);
out_ret:
if (ret)
rds_rdma_free_op(op);
else
rds_stats_inc(s_send_rdma);
return ret;
}
/*
* The application wants us to pass an RDMA destination (aka MR)
* to the remote
*/
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
unsigned long flags;
struct rds_mr *mr;
u32 r_key;
int err = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
/* We are reusing a previously mapped MR here. Most likely, the
* application has written to the buffer, so we need to explicitly
* flush those writes to RAM. Otherwise the HCA may not see them
* when doing a DMA from that buffer.
*/
r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr)
err = -EINVAL; /* invalid r_key */
else
kref_get(&mr->r_kref);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (mr) {
mr->r_trans->sync_mr(mr->r_trans_private,
DMA_TO_DEVICE);
rm->rdma.op_rdma_mr = mr;
}
return err;
}
/*
* The application passes us an address range it wants to enable RDMA
* to/from. We map the area, and save the <R_Key,offset> pair
* in rm->m_rdma_cookie. This causes it to be sent along to the peer
* in an extension header.
*/
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
&rm->rdma.op_rdma_mr, rm->m_conn_path);
}
/*
* Fill in rds_message for an atomic request.
*/
int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct page *page = NULL;
struct rds_atomic_args *args;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
|| rm->atomic.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
/* Nonmasked & masked cmsg ops converted to masked hw ops */
switch (cmsg->cmsg_type) {
case RDS_CMSG_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = 0;
break;
case RDS_CMSG_MASKED_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->m_fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
break;
case RDS_CMSG_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->cswp.compare;
rm->atomic.op_m_cswp.swap = args->cswp.swap;
rm->atomic.op_m_cswp.compare_mask = ~0;
rm->atomic.op_m_cswp.swap_mask = ~0;
break;
case RDS_CMSG_MASKED_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
break;
default:
BUG(); /* should never happen */
}
rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
rm->atomic.op_active = 1;
rm->atomic.op_recverr = rs->rs_recverr;
rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
if (IS_ERR(rm->atomic.op_sg)) {
ret = PTR_ERR(rm->atomic.op_sg);
goto err;
}
/* verify 8 byte-aligned */
if (args->local_addr & 0x7) {
ret = -EFAULT;
goto err;
}
ret = rds_pin_pages(args->local_addr, 1, &page, 1);
if (ret != 1)
goto err;
ret = 0;
sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
if (rm->atomic.op_notify || rm->atomic.op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
if (!rm->atomic.op_notifier) {
ret = -ENOMEM;
goto err;
}
rm->atomic.op_notifier->n_user_token = args->user_token;
rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
}
rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
return ret;
err:
if (page)
unpin_user_page(page);
rm->atomic.op_active = 0;
kfree(rm->atomic.op_notifier);
return ret;
}
| linux-master | net/rds/rdma.c |
/*
* Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/ipv6.h>
#include "rds_single_path.h"
#include "rds.h"
#include "loop.h"
static DEFINE_SPINLOCK(loop_conns_lock);
static LIST_HEAD(loop_conns);
static atomic_t rds_loop_unloading = ATOMIC_INIT(0);
static void rds_loop_set_unloading(void)
{
atomic_set(&rds_loop_unloading, 1);
}
static bool rds_loop_is_unloading(struct rds_connection *conn)
{
return atomic_read(&rds_loop_unloading) != 0;
}
/*
* This 'loopback' transport is a special case for flows that originate
* and terminate on the same machine.
*
* Connection build-up notices if the destination address is thought of
* as a local address by a transport. At that time it decides to use the
* loopback transport instead of the bound transport of the sending socket.
*
* The loopback transport's sending path just hands the sent rds_message
* straight to the receiving path via an embedded rds_incoming.
*/
/*
* Usually a message transits both the sender and receiver's conns as it
* flows to the receiver. In the loopback case, though, the receive path
* is handed the sending conn so the sense of the addresses is reversed.
*/
static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg,
unsigned int off)
{
struct scatterlist *sgp = &rm->data.op_sg[sg];
int ret = sizeof(struct rds_header) +
be32_to_cpu(rm->m_inc.i_hdr.h_len);
/* Do not send cong updates to loopback */
if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off);
goto out;
}
BUG_ON(hdr_off || sg || off);
rds_inc_init(&rm->m_inc, conn, &conn->c_laddr);
/* For the embedded inc. Matching put is in loop_inc_free() */
rds_message_addref(rm);
rds_recv_incoming(conn, &conn->c_laddr, &conn->c_faddr, &rm->m_inc,
GFP_KERNEL);
rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence),
NULL);
rds_inc_put(&rm->m_inc);
out:
return ret;
}
/*
* See rds_loop_xmit(). Since our inc is embedded in the rm, we
* make sure the rm lives at least until the inc is done.
*/
static void rds_loop_inc_free(struct rds_incoming *inc)
{
struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
rds_message_put(rm);
}
/* we need to at least give the thread something to succeed */
static int rds_loop_recv_path(struct rds_conn_path *cp)
{
return 0;
}
struct rds_loop_connection {
struct list_head loop_node;
struct rds_connection *conn;
};
/*
* Even the loopback transport needs to keep track of its connections,
* so it can call rds_conn_destroy() on them on exit. N.B. there are
* 1+ loopback addresses (127.*.*.*) so it's not a bug to have
* multiple loopback conns allocated, although rather useless.
*/
static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
{
struct rds_loop_connection *lc;
unsigned long flags;
lc = kzalloc(sizeof(struct rds_loop_connection), gfp);
if (!lc)
return -ENOMEM;
INIT_LIST_HEAD(&lc->loop_node);
lc->conn = conn;
conn->c_transport_data = lc;
spin_lock_irqsave(&loop_conns_lock, flags);
list_add_tail(&lc->loop_node, &loop_conns);
spin_unlock_irqrestore(&loop_conns_lock, flags);
return 0;
}
static void rds_loop_conn_free(void *arg)
{
struct rds_loop_connection *lc = arg;
unsigned long flags;
rdsdebug("lc %p\n", lc);
spin_lock_irqsave(&loop_conns_lock, flags);
list_del(&lc->loop_node);
spin_unlock_irqrestore(&loop_conns_lock, flags);
kfree(lc);
}
static int rds_loop_conn_path_connect(struct rds_conn_path *cp)
{
rds_connect_complete(cp->cp_conn);
return 0;
}
static void rds_loop_conn_path_shutdown(struct rds_conn_path *cp)
{
}
void rds_loop_exit(void)
{
struct rds_loop_connection *lc, *_lc;
LIST_HEAD(tmp_list);
rds_loop_set_unloading();
synchronize_rcu();
/* avoid calling conn_destroy with irqs off */
spin_lock_irq(&loop_conns_lock);
list_splice(&loop_conns, &tmp_list);
INIT_LIST_HEAD(&loop_conns);
spin_unlock_irq(&loop_conns_lock);
list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) {
WARN_ON(lc->conn->c_passive);
rds_conn_destroy(lc->conn);
}
}
static void rds_loop_kill_conns(struct net *net)
{
struct rds_loop_connection *lc, *_lc;
LIST_HEAD(tmp_list);
spin_lock_irq(&loop_conns_lock);
list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node) {
struct net *c_net = read_pnet(&lc->conn->c_net);
if (net != c_net)
continue;
list_move_tail(&lc->loop_node, &tmp_list);
}
spin_unlock_irq(&loop_conns_lock);
list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) {
WARN_ON(lc->conn->c_passive);
rds_conn_destroy(lc->conn);
}
}
static void __net_exit rds_loop_exit_net(struct net *net)
{
rds_loop_kill_conns(net);
}
static struct pernet_operations rds_loop_net_ops = {
.exit = rds_loop_exit_net,
};
int rds_loop_net_init(void)
{
return register_pernet_device(&rds_loop_net_ops);
}
void rds_loop_net_exit(void)
{
unregister_pernet_device(&rds_loop_net_ops);
}
/*
* This is missing .xmit_* because loop doesn't go through generic
* rds_send_xmit() and doesn't call rds_recv_incoming(). .listen_stop and
* .laddr_check are missing because transport.c doesn't iterate over
* rds_loop_transport.
*/
struct rds_transport rds_loop_transport = {
.xmit = rds_loop_xmit,
.recv_path = rds_loop_recv_path,
.conn_alloc = rds_loop_conn_alloc,
.conn_free = rds_loop_conn_free,
.conn_path_connect = rds_loop_conn_path_connect,
.conn_path_shutdown = rds_loop_conn_path_shutdown,
.inc_copy_to_user = rds_message_inc_copy_to_user,
.inc_free = rds_loop_inc_free,
.t_name = "loopback",
.t_type = RDS_TRANS_LOOP,
.t_unloading = rds_loop_is_unloading,
};
| linux-master | net/rds/loop.c |
/*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include "rds.h"
static struct ctl_table_header *rds_sysctl_reg_table;
static unsigned long rds_sysctl_reconnect_min = 1;
static unsigned long rds_sysctl_reconnect_max = ~0UL;
unsigned long rds_sysctl_reconnect_min_jiffies;
unsigned long rds_sysctl_reconnect_max_jiffies = HZ;
unsigned int rds_sysctl_max_unacked_packets = 8;
unsigned int rds_sysctl_max_unacked_bytes = (16 << 20);
unsigned int rds_sysctl_ping_enable = 1;
static struct ctl_table rds_sysctl_rds_table[] = {
{
.procname = "reconnect_min_delay_ms",
.data = &rds_sysctl_reconnect_min_jiffies,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = &rds_sysctl_reconnect_min,
.extra2 = &rds_sysctl_reconnect_max_jiffies,
},
{
.procname = "reconnect_max_delay_ms",
.data = &rds_sysctl_reconnect_max_jiffies,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = &rds_sysctl_reconnect_min_jiffies,
.extra2 = &rds_sysctl_reconnect_max,
},
{
.procname = "max_unacked_packets",
.data = &rds_sysctl_max_unacked_packets,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "max_unacked_bytes",
.data = &rds_sysctl_max_unacked_bytes,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ping_enable",
.data = &rds_sysctl_ping_enable,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
void rds_sysctl_exit(void)
{
unregister_net_sysctl_table(rds_sysctl_reg_table);
}
int rds_sysctl_init(void)
{
rds_sysctl_reconnect_min = msecs_to_jiffies(1);
rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
rds_sysctl_reg_table =
register_net_sysctl(&init_net, "net/rds", rds_sysctl_rds_table);
if (!rds_sysctl_reg_table)
return -ENOMEM;
return 0;
}
| linux-master | net/rds/sysctl.c |
/*
* Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <net/sock.h>
#include <linux/in.h>
#include <linux/ipv6.h>
#include <linux/if_arp.h>
#include <linux/jhash.h>
#include <linux/ratelimit.h>
#include "rds.h"
static struct rhashtable bind_hash_table;
static const struct rhashtable_params ht_parms = {
.nelem_hint = 768,
.key_len = RDS_BOUND_KEY_LEN,
.key_offset = offsetof(struct rds_sock, rs_bound_key),
.head_offset = offsetof(struct rds_sock, rs_bound_node),
.max_size = 16384,
.min_size = 1024,
};
/* Create a key for the bind hash table manipulation. Port is in network byte
* order.
*/
static inline void __rds_create_bind_key(u8 *key, const struct in6_addr *addr,
__be16 port, __u32 scope_id)
{
memcpy(key, addr, sizeof(*addr));
key += sizeof(*addr);
memcpy(key, &port, sizeof(port));
key += sizeof(port);
memcpy(key, &scope_id, sizeof(scope_id));
}
/*
* Return the rds_sock bound at the given local address.
*
* The rx path can race with rds_release. We notice if rds_release() has
* marked this socket and don't return a rs ref to the rx path.
*/
struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
__u32 scope_id)
{
u8 key[RDS_BOUND_KEY_LEN];
struct rds_sock *rs;
__rds_create_bind_key(key, addr, port, scope_id);
rcu_read_lock();
rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
!refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
rs = NULL;
rcu_read_unlock();
rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
ntohs(port));
return rs;
}
/* returns -ve errno or +ve port */
static int rds_add_bound(struct rds_sock *rs, const struct in6_addr *addr,
__be16 *port, __u32 scope_id)
{
int ret = -EADDRINUSE;
u16 rover, last;
u8 key[RDS_BOUND_KEY_LEN];
if (*port != 0) {
rover = be16_to_cpu(*port);
if (rover == RDS_FLAG_PROBE_PORT)
return -EINVAL;
last = rover;
} else {
rover = max_t(u16, get_random_u16(), 2);
last = rover - 1;
}
do {
if (rover == 0)
rover++;
if (rover == RDS_FLAG_PROBE_PORT)
continue;
__rds_create_bind_key(key, addr, cpu_to_be16(rover),
scope_id);
if (rhashtable_lookup_fast(&bind_hash_table, key, ht_parms))
continue;
memcpy(rs->rs_bound_key, key, sizeof(rs->rs_bound_key));
rs->rs_bound_addr = *addr;
net_get_random_once(&rs->rs_hash_initval,
sizeof(rs->rs_hash_initval));
rs->rs_bound_port = cpu_to_be16(rover);
rs->rs_bound_node.next = NULL;
rds_sock_addref(rs);
if (!rhashtable_insert_fast(&bind_hash_table,
&rs->rs_bound_node, ht_parms)) {
*port = rs->rs_bound_port;
rs->rs_bound_scope_id = scope_id;
ret = 0;
rdsdebug("rs %p binding to %pI6c:%d\n",
rs, addr, (int)ntohs(*port));
break;
} else {
rs->rs_bound_addr = in6addr_any;
rds_sock_put(rs);
ret = -ENOMEM;
break;
}
} while (rover++ != last);
return ret;
}
void rds_remove_bound(struct rds_sock *rs)
{
if (ipv6_addr_any(&rs->rs_bound_addr))
return;
rdsdebug("rs %p unbinding from %pI6c:%d\n",
rs, &rs->rs_bound_addr,
ntohs(rs->rs_bound_port));
rhashtable_remove_fast(&bind_hash_table, &rs->rs_bound_node, ht_parms);
rds_sock_put(rs);
rs->rs_bound_addr = in6addr_any;
}
int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
struct in6_addr v6addr, *binding_addr;
struct rds_transport *trans;
__u32 scope_id = 0;
int ret = 0;
__be16 port;
/* We allow an RDS socket to be bound to either IPv4 or IPv6
* address.
*/
if (addr_len < offsetofend(struct sockaddr, sa_family))
return -EINVAL;
if (uaddr->sa_family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
if (addr_len < sizeof(struct sockaddr_in) ||
sin->sin_addr.s_addr == htonl(INADDR_ANY) ||
sin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
ipv4_is_multicast(sin->sin_addr.s_addr))
return -EINVAL;
ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &v6addr);
binding_addr = &v6addr;
port = sin->sin_port;
#if IS_ENABLED(CONFIG_IPV6)
} else if (uaddr->sa_family == AF_INET6) {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)uaddr;
int addr_type;
if (addr_len < sizeof(struct sockaddr_in6))
return -EINVAL;
addr_type = ipv6_addr_type(&sin6->sin6_addr);
if (!(addr_type & IPV6_ADDR_UNICAST)) {
__be32 addr4;
if (!(addr_type & IPV6_ADDR_MAPPED))
return -EINVAL;
/* It is a mapped address. Need to do some sanity
* checks.
*/
addr4 = sin6->sin6_addr.s6_addr32[3];
if (addr4 == htonl(INADDR_ANY) ||
addr4 == htonl(INADDR_BROADCAST) ||
ipv4_is_multicast(addr4))
return -EINVAL;
}
/* The scope ID must be specified for link local address. */
if (addr_type & IPV6_ADDR_LINKLOCAL) {
if (sin6->sin6_scope_id == 0)
return -EINVAL;
scope_id = sin6->sin6_scope_id;
}
binding_addr = &sin6->sin6_addr;
port = sin6->sin6_port;
#endif
} else {
return -EINVAL;
}
lock_sock(sk);
/* RDS socket does not allow re-binding. */
if (!ipv6_addr_any(&rs->rs_bound_addr)) {
ret = -EINVAL;
goto out;
}
/* Socket is connected. The binding address should have the same
* scope ID as the connected address, except the case when one is
* non-link local address (scope_id is 0).
*/
if (!ipv6_addr_any(&rs->rs_conn_addr) && scope_id &&
rs->rs_bound_scope_id &&
scope_id != rs->rs_bound_scope_id) {
ret = -EINVAL;
goto out;
}
/* The transport can be set using SO_RDS_TRANSPORT option before the
* socket is bound.
*/
if (rs->rs_transport) {
trans = rs->rs_transport;
if (!trans->laddr_check ||
trans->laddr_check(sock_net(sock->sk),
binding_addr, scope_id) != 0) {
ret = -ENOPROTOOPT;
goto out;
}
} else {
trans = rds_trans_get_preferred(sock_net(sock->sk),
binding_addr, scope_id);
if (!trans) {
ret = -EADDRNOTAVAIL;
pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
__func__, binding_addr);
goto out;
}
rs->rs_transport = trans;
}
sock_set_flag(sk, SOCK_RCU_FREE);
ret = rds_add_bound(rs, binding_addr, &port, scope_id);
if (ret)
rs->rs_transport = NULL;
out:
release_sock(sk);
return ret;
}
void rds_bind_lock_destroy(void)
{
rhashtable_destroy(&bind_hash_table);
}
int rds_bind_lock_init(void)
{
return rhashtable_init(&bind_hash_table, &ht_parms);
}
| linux-master | net/rds/bind.c |
/*
* Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#include <linux/llist.h>
#include "rds_single_path.h"
#include "ib_mr.h"
#include "rds.h"
struct workqueue_struct *rds_ib_mr_wq;
struct rds_ib_dereg_odp_mr {
struct work_struct work;
struct ib_mr *mr;
};
static void rds_ib_odp_mr_worker(struct work_struct *work);
static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
{
struct rds_ib_device *rds_ibdev;
struct rds_ib_ipaddr *i_ipaddr;
rcu_read_lock();
list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
if (i_ipaddr->ipaddr == ipaddr) {
refcount_inc(&rds_ibdev->refcount);
rcu_read_unlock();
return rds_ibdev;
}
}
}
rcu_read_unlock();
return NULL;
}
static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
{
struct rds_ib_ipaddr *i_ipaddr;
i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
if (!i_ipaddr)
return -ENOMEM;
i_ipaddr->ipaddr = ipaddr;
spin_lock_irq(&rds_ibdev->spinlock);
list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
spin_unlock_irq(&rds_ibdev->spinlock);
return 0;
}
static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
{
struct rds_ib_ipaddr *i_ipaddr;
struct rds_ib_ipaddr *to_free = NULL;
spin_lock_irq(&rds_ibdev->spinlock);
list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
if (i_ipaddr->ipaddr == ipaddr) {
list_del_rcu(&i_ipaddr->list);
to_free = i_ipaddr;
break;
}
}
spin_unlock_irq(&rds_ibdev->spinlock);
if (to_free)
kfree_rcu(to_free, rcu);
}
int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
struct in6_addr *ipaddr)
{
struct rds_ib_device *rds_ibdev_old;
rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]);
if (!rds_ibdev_old)
return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
if (rds_ibdev_old != rds_ibdev) {
rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]);
rds_ib_dev_put(rds_ibdev_old);
return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
}
rds_ib_dev_put(rds_ibdev_old);
return 0;
}
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
{
struct rds_ib_connection *ic = conn->c_transport_data;
/* conn was previously on the nodev_conns_list */
spin_lock_irq(&ib_nodev_conns_lock);
BUG_ON(list_empty(&ib_nodev_conns));
BUG_ON(list_empty(&ic->ib_node));
list_del(&ic->ib_node);
spin_lock(&rds_ibdev->spinlock);
list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
spin_unlock(&rds_ibdev->spinlock);
spin_unlock_irq(&ib_nodev_conns_lock);
ic->rds_ibdev = rds_ibdev;
refcount_inc(&rds_ibdev->refcount);
}
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
{
struct rds_ib_connection *ic = conn->c_transport_data;
/* place conn on nodev_conns_list */
spin_lock(&ib_nodev_conns_lock);
spin_lock_irq(&rds_ibdev->spinlock);
BUG_ON(list_empty(&ic->ib_node));
list_del(&ic->ib_node);
spin_unlock_irq(&rds_ibdev->spinlock);
list_add_tail(&ic->ib_node, &ib_nodev_conns);
spin_unlock(&ib_nodev_conns_lock);
ic->rds_ibdev = NULL;
rds_ib_dev_put(rds_ibdev);
}
void rds_ib_destroy_nodev_conns(void)
{
struct rds_ib_connection *ic, *_ic;
LIST_HEAD(tmp_list);
/* avoid calling conn_destroy with irqs off */
spin_lock_irq(&ib_nodev_conns_lock);
list_splice(&ib_nodev_conns, &tmp_list);
spin_unlock_irq(&ib_nodev_conns_lock);
list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
rds_conn_destroy(ic->conn);
}
void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
{
struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
iinfo->rdma_mr_max = pool_1m->max_items;
iinfo->rdma_mr_size = pool_1m->max_pages;
}
#if IS_ENABLED(CONFIG_IPV6)
void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
struct rds6_info_rdma_connection *iinfo6)
{
struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
iinfo6->rdma_mr_max = pool_1m->max_items;
iinfo6->rdma_mr_size = pool_1m->max_pages;
}
#endif
struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
{
struct rds_ib_mr *ibmr = NULL;
struct llist_node *ret;
unsigned long flags;
spin_lock_irqsave(&pool->clean_lock, flags);
ret = llist_del_first(&pool->clean_list);
spin_unlock_irqrestore(&pool->clean_lock, flags);
if (ret) {
ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
if (pool->pool_type == RDS_IB_MR_8K_POOL)
rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
}
return ibmr;
}
void rds_ib_sync_mr(void *trans_private, int direction)
{
struct rds_ib_mr *ibmr = trans_private;
struct rds_ib_device *rds_ibdev = ibmr->device;
if (ibmr->odp)
return;
switch (direction) {
case DMA_FROM_DEVICE:
ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
break;
case DMA_TO_DEVICE:
ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
break;
}
}
void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
{
struct rds_ib_device *rds_ibdev = ibmr->device;
if (ibmr->sg_dma_len) {
ib_dma_unmap_sg(rds_ibdev->dev,
ibmr->sg, ibmr->sg_len,
DMA_BIDIRECTIONAL);
ibmr->sg_dma_len = 0;
}
/* Release the s/g list */
if (ibmr->sg_len) {
unsigned int i;
for (i = 0; i < ibmr->sg_len; ++i) {
struct page *page = sg_page(&ibmr->sg[i]);
/* FIXME we need a way to tell a r/w MR
* from a r/o MR */
WARN_ON(!page->mapping && irqs_disabled());
set_page_dirty(page);
put_page(page);
}
kfree(ibmr->sg);
ibmr->sg = NULL;
ibmr->sg_len = 0;
}
}
void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
{
unsigned int pinned = ibmr->sg_len;
__rds_ib_teardown_mr(ibmr);
if (pinned) {
struct rds_ib_mr_pool *pool = ibmr->pool;
atomic_sub(pinned, &pool->free_pinned);
}
}
static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
{
unsigned int item_count;
item_count = atomic_read(&pool->item_count);
if (free_all)
return item_count;
return 0;
}
/*
* given an llist of mrs, put them all into the list_head for more processing
*/
static unsigned int llist_append_to_list(struct llist_head *llist,
struct list_head *list)
{
struct rds_ib_mr *ibmr;
struct llist_node *node;
struct llist_node *next;
unsigned int count = 0;
node = llist_del_all(llist);
while (node) {
next = node->next;
ibmr = llist_entry(node, struct rds_ib_mr, llnode);
list_add_tail(&ibmr->unmap_list, list);
node = next;
count++;
}
return count;
}
/*
* this takes a list head of mrs and turns it into linked llist nodes
* of clusters. Each cluster has linked llist nodes of
* MR_CLUSTER_SIZE mrs that are ready for reuse.
*/
static void list_to_llist_nodes(struct list_head *list,
struct llist_node **nodes_head,
struct llist_node **nodes_tail)
{
struct rds_ib_mr *ibmr;
struct llist_node *cur = NULL;
struct llist_node **next = nodes_head;
list_for_each_entry(ibmr, list, unmap_list) {
cur = &ibmr->llnode;
*next = cur;
next = &cur->next;
}
*next = NULL;
*nodes_tail = cur;
}
/*
* Flush our pool of MRs.
* At a minimum, all currently unused MRs are unmapped.
* If the number of MRs allocated exceeds the limit, we also try
* to free as many MRs as needed to get back to this limit.
*/
int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
int free_all, struct rds_ib_mr **ibmr_ret)
{
struct rds_ib_mr *ibmr;
struct llist_node *clean_nodes;
struct llist_node *clean_tail;
LIST_HEAD(unmap_list);
unsigned long unpinned = 0;
unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
if (pool->pool_type == RDS_IB_MR_8K_POOL)
rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
if (ibmr_ret) {
DEFINE_WAIT(wait);
while (!mutex_trylock(&pool->flush_lock)) {
ibmr = rds_ib_reuse_mr(pool);
if (ibmr) {
*ibmr_ret = ibmr;
finish_wait(&pool->flush_wait, &wait);
goto out_nolock;
}
prepare_to_wait(&pool->flush_wait, &wait,
TASK_UNINTERRUPTIBLE);
if (llist_empty(&pool->clean_list))
schedule();
ibmr = rds_ib_reuse_mr(pool);
if (ibmr) {
*ibmr_ret = ibmr;
finish_wait(&pool->flush_wait, &wait);
goto out_nolock;
}
}
finish_wait(&pool->flush_wait, &wait);
} else
mutex_lock(&pool->flush_lock);
if (ibmr_ret) {
ibmr = rds_ib_reuse_mr(pool);
if (ibmr) {
*ibmr_ret = ibmr;
goto out;
}
}
/* Get the list of all MRs to be dropped. Ordering matters -
* we want to put drop_list ahead of free_list.
*/
dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
if (free_all) {
unsigned long flags;
spin_lock_irqsave(&pool->clean_lock, flags);
llist_append_to_list(&pool->clean_list, &unmap_list);
spin_unlock_irqrestore(&pool->clean_lock, flags);
}
free_goal = rds_ib_flush_goal(pool, free_all);
if (list_empty(&unmap_list))
goto out;
rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
if (!list_empty(&unmap_list)) {
unsigned long flags;
list_to_llist_nodes(&unmap_list, &clean_nodes, &clean_tail);
if (ibmr_ret) {
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
clean_nodes = clean_nodes->next;
}
/* more than one entry in llist nodes */
if (clean_nodes) {
spin_lock_irqsave(&pool->clean_lock, flags);
llist_add_batch(clean_nodes, clean_tail,
&pool->clean_list);
spin_unlock_irqrestore(&pool->clean_lock, flags);
}
}
atomic_sub(unpinned, &pool->free_pinned);
atomic_sub(dirty_to_clean, &pool->dirty_count);
atomic_sub(nfreed, &pool->item_count);
out:
mutex_unlock(&pool->flush_lock);
if (waitqueue_active(&pool->flush_wait))
wake_up(&pool->flush_wait);
out_nolock:
return 0;
}
struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
{
struct rds_ib_mr *ibmr = NULL;
int iter = 0;
while (1) {
ibmr = rds_ib_reuse_mr(pool);
if (ibmr)
return ibmr;
if (atomic_inc_return(&pool->item_count) <= pool->max_items)
break;
atomic_dec(&pool->item_count);
if (++iter > 2) {
if (pool->pool_type == RDS_IB_MR_8K_POOL)
rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
break;
}
/* We do have some empty MRs. Flush them out. */
if (pool->pool_type == RDS_IB_MR_8K_POOL)
rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
rds_ib_flush_mr_pool(pool, 0, &ibmr);
if (ibmr)
return ibmr;
}
return NULL;
}
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
{
struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
rds_ib_flush_mr_pool(pool, 0, NULL);
}
void rds_ib_free_mr(void *trans_private, int invalidate)
{
struct rds_ib_mr *ibmr = trans_private;
struct rds_ib_mr_pool *pool = ibmr->pool;
struct rds_ib_device *rds_ibdev = ibmr->device;
rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
if (ibmr->odp) {
/* A MR created and marked as use_once. We use delayed work,
* because there is a change that we are in interrupt and can't
* call to ib_dereg_mr() directly.
*/
INIT_DELAYED_WORK(&ibmr->work, rds_ib_odp_mr_worker);
queue_delayed_work(rds_ib_mr_wq, &ibmr->work, 0);
return;
}
/* Return it to the pool's free list */
rds_ib_free_frmr_list(ibmr);
atomic_add(ibmr->sg_len, &pool->free_pinned);
atomic_inc(&pool->dirty_count);
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
atomic_read(&pool->dirty_count) >= pool->max_items / 5)
queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
if (invalidate) {
if (likely(!in_interrupt())) {
rds_ib_flush_mr_pool(pool, 0, NULL);
} else {
/* We get here if the user created a MR marked
* as use_once and invalidate at the same time.
*/
queue_delayed_work(rds_ib_mr_wq,
&pool->flush_worker, 10);
}
}
rds_ib_dev_put(rds_ibdev);
}
void rds_ib_flush_mrs(void)
{
struct rds_ib_device *rds_ibdev;
down_read(&rds_ib_devices_lock);
list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
if (rds_ibdev->mr_8k_pool)
rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
if (rds_ibdev->mr_1m_pool)
rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
}
up_read(&rds_ib_devices_lock);
}
u32 rds_ib_get_lkey(void *trans_private)
{
struct rds_ib_mr *ibmr = trans_private;
return ibmr->u.mr->lkey;
}
void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
struct rds_sock *rs, u32 *key_ret,
struct rds_connection *conn,
u64 start, u64 length, int need_odp)
{
struct rds_ib_device *rds_ibdev;
struct rds_ib_mr *ibmr = NULL;
struct rds_ib_connection *ic = NULL;
int ret;
rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
if (!rds_ibdev) {
ret = -ENODEV;
goto out;
}
if (need_odp == ODP_ZEROBASED || need_odp == ODP_VIRTUAL) {
u64 virt_addr = need_odp == ODP_ZEROBASED ? 0 : start;
int access_flags =
(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC |
IB_ACCESS_ON_DEMAND);
struct ib_sge sge = {};
struct ib_mr *ib_mr;
if (!rds_ibdev->odp_capable) {
ret = -EOPNOTSUPP;
goto out;
}
ib_mr = ib_reg_user_mr(rds_ibdev->pd, start, length, virt_addr,
access_flags);
if (IS_ERR(ib_mr)) {
rdsdebug("rds_ib_get_user_mr returned %d\n",
IS_ERR(ib_mr));
ret = PTR_ERR(ib_mr);
goto out;
}
if (key_ret)
*key_ret = ib_mr->rkey;
ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
if (!ibmr) {
ib_dereg_mr(ib_mr);
ret = -ENOMEM;
goto out;
}
ibmr->u.mr = ib_mr;
ibmr->odp = 1;
sge.addr = virt_addr;
sge.length = length;
sge.lkey = ib_mr->lkey;
ib_advise_mr(rds_ibdev->pd,
IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE,
IB_UVERBS_ADVISE_MR_FLAG_FLUSH, &sge, 1);
return ibmr;
}
if (conn)
ic = conn->c_transport_data;
if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
ret = -ENODEV;
goto out;
}
ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
if (IS_ERR(ibmr)) {
ret = PTR_ERR(ibmr);
pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
} else {
return ibmr;
}
out:
if (rds_ibdev)
rds_ib_dev_put(rds_ibdev);
return ERR_PTR(ret);
}
void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
{
cancel_delayed_work_sync(&pool->flush_worker);
rds_ib_flush_mr_pool(pool, 1, NULL);
WARN_ON(atomic_read(&pool->item_count));
WARN_ON(atomic_read(&pool->free_pinned));
kfree(pool);
}
struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
int pool_type)
{
struct rds_ib_mr_pool *pool;
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return ERR_PTR(-ENOMEM);
pool->pool_type = pool_type;
init_llist_head(&pool->free_list);
init_llist_head(&pool->drop_list);
init_llist_head(&pool->clean_list);
spin_lock_init(&pool->clean_lock);
mutex_init(&pool->flush_lock);
init_waitqueue_head(&pool->flush_wait);
INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
if (pool_type == RDS_IB_MR_1M_POOL) {
/* +1 allows for unaligned MRs */
pool->max_pages = RDS_MR_1M_MSG_SIZE + 1;
pool->max_items = rds_ibdev->max_1m_mrs;
} else {
/* pool_type == RDS_IB_MR_8K_POOL */
pool->max_pages = RDS_MR_8K_MSG_SIZE + 1;
pool->max_items = rds_ibdev->max_8k_mrs;
}
pool->max_free_pinned = pool->max_items * pool->max_pages / 4;
pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
return pool;
}
int rds_ib_mr_init(void)
{
rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
if (!rds_ib_mr_wq)
return -ENOMEM;
return 0;
}
/* By the time this is called all the IB devices should have been torn down and
* had their pools freed. As each pool is freed its work struct is waited on,
* so the pool flushing work queue should be idle by the time we get here.
*/
void rds_ib_mr_exit(void)
{
destroy_workqueue(rds_ib_mr_wq);
}
static void rds_ib_odp_mr_worker(struct work_struct *work)
{
struct rds_ib_mr *ibmr;
ibmr = container_of(work, struct rds_ib_mr, work.work);
ib_dereg_mr(ibmr->u.mr);
kfree(ibmr);
}
| linux-master | net/rds/ib_rdma.c |
/*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/percpu.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
#include "rds.h"
/*
* This file implements a getsockopt() call which copies a set of fixed
* sized structs into a user-specified buffer as a means of providing
* read-only information about RDS.
*
* For a given information source there are a given number of fixed sized
* structs at a given time. The structs are only copied if the user-specified
* buffer is big enough. The destination pages that make up the buffer
* are pinned for the duration of the copy.
*
* This gives us the following benefits:
*
* - simple implementation, no copy "position" across multiple calls
* - consistent snapshot of an info source
* - atomic copy works well with whatever locking info source has
* - one portable tool to get rds info across implementations
* - long-lived tool can get info without allocating
*
* at the following costs:
*
* - info source copy must be pinned, may be "large"
*/
struct rds_info_iterator {
struct page **pages;
void *addr;
unsigned long offset;
};
static DEFINE_SPINLOCK(rds_info_lock);
static rds_info_func rds_info_funcs[RDS_INFO_LAST - RDS_INFO_FIRST + 1];
void rds_info_register_func(int optname, rds_info_func func)
{
int offset = optname - RDS_INFO_FIRST;
BUG_ON(optname < RDS_INFO_FIRST || optname > RDS_INFO_LAST);
spin_lock(&rds_info_lock);
BUG_ON(rds_info_funcs[offset]);
rds_info_funcs[offset] = func;
spin_unlock(&rds_info_lock);
}
EXPORT_SYMBOL_GPL(rds_info_register_func);
void rds_info_deregister_func(int optname, rds_info_func func)
{
int offset = optname - RDS_INFO_FIRST;
BUG_ON(optname < RDS_INFO_FIRST || optname > RDS_INFO_LAST);
spin_lock(&rds_info_lock);
BUG_ON(rds_info_funcs[offset] != func);
rds_info_funcs[offset] = NULL;
spin_unlock(&rds_info_lock);
}
EXPORT_SYMBOL_GPL(rds_info_deregister_func);
/*
* Typically we hold an atomic kmap across multiple rds_info_copy() calls
* because the kmap is so expensive. This must be called before using blocking
* operations while holding the mapping and as the iterator is torn down.
*/
void rds_info_iter_unmap(struct rds_info_iterator *iter)
{
if (iter->addr) {
kunmap_atomic(iter->addr);
iter->addr = NULL;
}
}
/*
* get_user_pages() called flush_dcache_page() on the pages for us.
*/
void rds_info_copy(struct rds_info_iterator *iter, void *data,
unsigned long bytes)
{
unsigned long this;
while (bytes) {
if (!iter->addr)
iter->addr = kmap_atomic(*iter->pages);
this = min(bytes, PAGE_SIZE - iter->offset);
rdsdebug("page %p addr %p offset %lu this %lu data %p "
"bytes %lu\n", *iter->pages, iter->addr,
iter->offset, this, data, bytes);
memcpy(iter->addr + iter->offset, data, this);
data += this;
bytes -= this;
iter->offset += this;
if (iter->offset == PAGE_SIZE) {
kunmap_atomic(iter->addr);
iter->addr = NULL;
iter->offset = 0;
iter->pages++;
}
}
}
EXPORT_SYMBOL_GPL(rds_info_copy);
/*
* @optval points to the userspace buffer that the information snapshot
* will be copied into.
*
* @optlen on input is the size of the buffer in userspace. @optlen
* on output is the size of the requested snapshot in bytes.
*
* This function returns -errno if there is a failure, particularly -ENOSPC
* if the given userspace buffer was not large enough to fit the snapshot.
* On success it returns the positive number of bytes of each array element
* in the snapshot.
*/
int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
int __user *optlen)
{
struct rds_info_iterator iter;
struct rds_info_lengths lens;
unsigned long nr_pages = 0;
unsigned long start;
rds_info_func func;
struct page **pages = NULL;
int ret;
int len;
int total;
if (get_user(len, optlen)) {
ret = -EFAULT;
goto out;
}
/* check for all kinds of wrapping and the like */
start = (unsigned long)optval;
if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
ret = -EINVAL;
goto out;
}
/* a 0 len call is just trying to probe its length */
if (len == 0)
goto call_func;
nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK))
>> PAGE_SHIFT;
pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages);
if (ret != nr_pages) {
if (ret > 0)
nr_pages = ret;
else
nr_pages = 0;
ret = -EAGAIN; /* XXX ? */
goto out;
}
rdsdebug("len %d nr_pages %lu\n", len, nr_pages);
call_func:
func = rds_info_funcs[optname - RDS_INFO_FIRST];
if (!func) {
ret = -ENOPROTOOPT;
goto out;
}
iter.pages = pages;
iter.addr = NULL;
iter.offset = start & (PAGE_SIZE - 1);
func(sock, len, &iter, &lens);
BUG_ON(lens.each == 0);
total = lens.nr * lens.each;
rds_info_iter_unmap(&iter);
if (total > len) {
len = total;
ret = -ENOSPC;
} else {
len = total;
ret = lens.each;
}
if (put_user(len, optlen))
ret = -EFAULT;
out:
if (pages)
unpin_user_pages(pages, nr_pages);
kfree(pages);
return ret;
}
| linux-master | net/rds/info.c |
/*
* Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/in.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/ratelimit.h>
#include "rds_single_path.h"
#include "rds.h"
#include "ib.h"
#include "ib_mr.h"
/*
* Convert IB-specific error message to RDS error message and call core
* completion handler.
*/
static void rds_ib_send_complete(struct rds_message *rm,
int wc_status,
void (*complete)(struct rds_message *rm, int status))
{
int notify_status;
switch (wc_status) {
case IB_WC_WR_FLUSH_ERR:
return;
case IB_WC_SUCCESS:
notify_status = RDS_RDMA_SUCCESS;
break;
case IB_WC_REM_ACCESS_ERR:
notify_status = RDS_RDMA_REMOTE_ERROR;
break;
default:
notify_status = RDS_RDMA_OTHER_ERROR;
break;
}
complete(rm, notify_status);
}
static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
struct rm_data_op *op,
int wc_status)
{
if (op->op_nents)
ib_dma_unmap_sg(ic->i_cm_id->device,
op->op_sg, op->op_nents,
DMA_TO_DEVICE);
}
static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
struct rm_rdma_op *op,
int wc_status)
{
if (op->op_mapped) {
ib_dma_unmap_sg(ic->i_cm_id->device,
op->op_sg, op->op_nents,
op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
op->op_mapped = 0;
}
/* If the user asked for a completion notification on this
* message, we can implement three different semantics:
* 1. Notify when we received the ACK on the RDS message
* that was queued with the RDMA. This provides reliable
* notification of RDMA status at the expense of a one-way
* packet delay.
* 2. Notify when the IB stack gives us the completion event for
* the RDMA operation.
* 3. Notify when the IB stack gives us the completion event for
* the accompanying RDS messages.
* Here, we implement approach #3. To implement approach #2,
* we would need to take an event for the rdma WR. To implement #1,
* don't call rds_rdma_send_complete at all, and fall back to the notify
* handling in the ACK processing code.
*
* Note: There's no need to explicitly sync any RDMA buffers using
* ib_dma_sync_sg_for_cpu - the completion for the RDMA
* operation itself unmapped the RDMA buffers, which takes care
* of synching.
*/
rds_ib_send_complete(container_of(op, struct rds_message, rdma),
wc_status, rds_rdma_send_complete);
if (op->op_write)
rds_stats_add(s_send_rdma_bytes, op->op_bytes);
else
rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
}
static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
struct rm_atomic_op *op,
int wc_status)
{
/* unmap atomic recvbuf */
if (op->op_mapped) {
ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
DMA_FROM_DEVICE);
op->op_mapped = 0;
}
rds_ib_send_complete(container_of(op, struct rds_message, atomic),
wc_status, rds_atomic_send_complete);
if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
rds_ib_stats_inc(s_ib_atomic_cswp);
else
rds_ib_stats_inc(s_ib_atomic_fadd);
}
/*
* Unmap the resources associated with a struct send_work.
*
* Returns the rm for no good reason other than it is unobtainable
* other than by switching on wr.opcode, currently, and the caller,
* the event handler, needs it.
*/
static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
struct rds_ib_send_work *send,
int wc_status)
{
struct rds_message *rm = NULL;
/* In the error case, wc.opcode sometimes contains garbage */
switch (send->s_wr.opcode) {
case IB_WR_SEND:
if (send->s_op) {
rm = container_of(send->s_op, struct rds_message, data);
rds_ib_send_unmap_data(ic, send->s_op, wc_status);
}
break;
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_READ:
if (send->s_op) {
rm = container_of(send->s_op, struct rds_message, rdma);
rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
}
break;
case IB_WR_ATOMIC_FETCH_AND_ADD:
case IB_WR_ATOMIC_CMP_AND_SWP:
if (send->s_op) {
rm = container_of(send->s_op, struct rds_message, atomic);
rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
}
break;
default:
printk_ratelimited(KERN_NOTICE
"RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
__func__, send->s_wr.opcode);
break;
}
send->s_wr.opcode = 0xdead;
return rm;
}
void rds_ib_send_init_ring(struct rds_ib_connection *ic)
{
struct rds_ib_send_work *send;
u32 i;
for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
struct ib_sge *sge;
send->s_op = NULL;
send->s_wr.wr_id = i;
send->s_wr.sg_list = send->s_sge;
send->s_wr.ex.imm_data = 0;
sge = &send->s_sge[0];
sge->addr = ic->i_send_hdrs_dma[i];
sge->length = sizeof(struct rds_header);
sge->lkey = ic->i_pd->local_dma_lkey;
send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
}
}
void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
{
struct rds_ib_send_work *send;
u32 i;
for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
if (send->s_op && send->s_wr.opcode != 0xdead)
rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
}
}
/*
* The only fast path caller always has a non-zero nr, so we don't
* bother testing nr before performing the atomic sub.
*/
static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
{
if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
waitqueue_active(&rds_ib_ring_empty_wait))
wake_up(&rds_ib_ring_empty_wait);
BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
}
/*
* The _oldest/_free ring operations here race cleanly with the alloc/unalloc
* operations performed in the send path. As the sender allocs and potentially
* unallocs the next free entry in the ring it doesn't alter which is
* the next to be freed, which is what this is concerned with.
*/
void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
{
struct rds_message *rm = NULL;
struct rds_connection *conn = ic->conn;
struct rds_ib_send_work *send;
u32 completed;
u32 oldest;
u32 i = 0;
int nr_sig = 0;
rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
(unsigned long long)wc->wr_id, wc->status,
ib_wc_status_msg(wc->status), wc->byte_len,
be32_to_cpu(wc->ex.imm_data));
rds_ib_stats_inc(s_ib_tx_cq_event);
if (wc->wr_id == RDS_IB_ACK_WR_ID) {
if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
rds_ib_stats_inc(s_ib_tx_stalled);
rds_ib_ack_send_complete(ic);
return;
}
oldest = rds_ib_ring_oldest(&ic->i_send_ring);
completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest);
for (i = 0; i < completed; i++) {
send = &ic->i_sends[oldest];
if (send->s_wr.send_flags & IB_SEND_SIGNALED)
nr_sig++;
rm = rds_ib_send_unmap_op(ic, send, wc->status);
if (time_after(jiffies, send->s_queued + HZ / 2))
rds_ib_stats_inc(s_ib_tx_stalled);
if (send->s_op) {
if (send->s_op == rm->m_final_op) {
/* If anyone waited for this message to get
* flushed out, wake them up now
*/
rds_message_unmapped(rm);
}
rds_message_put(rm);
send->s_op = NULL;
}
oldest = (oldest + 1) % ic->i_send_ring.w_nr;
}
rds_ib_ring_free(&ic->i_send_ring, completed);
rds_ib_sub_signaled(ic, nr_sig);
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
test_bit(0, &conn->c_map_queued))
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
/* We expect errors as the qp is drained during shutdown */
if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
&conn->c_laddr, &conn->c_faddr,
conn->c_tos, wc->status,
ib_wc_status_msg(wc->status), wc->vendor_err);
}
}
/*
* This is the main function for allocating credits when sending
* messages.
*
* Conceptually, we have two counters:
* - send credits: this tells us how many WRs we're allowed
* to submit without overruning the receiver's queue. For
* each SEND WR we post, we decrement this by one.
*
* - posted credits: this tells us how many WRs we recently
* posted to the receive queue. This value is transferred
* to the peer as a "credit update" in a RDS header field.
* Every time we transmit credits to the peer, we subtract
* the amount of transferred credits from this counter.
*
* It is essential that we avoid situations where both sides have
* exhausted their send credits, and are unable to send new credits
* to the peer. We achieve this by requiring that we send at least
* one credit update to the peer before exhausting our credits.
* When new credits arrive, we subtract one credit that is withheld
* until we've posted new buffers and are ready to transmit these
* credits (see rds_ib_send_add_credits below).
*
* The RDS send code is essentially single-threaded; rds_send_xmit
* sets RDS_IN_XMIT to ensure exclusive access to the send ring.
* However, the ACK sending code is independent and can race with
* message SENDs.
*
* In the send path, we need to update the counters for send credits
* and the counter of posted buffers atomically - when we use the
* last available credit, we cannot allow another thread to race us
* and grab the posted credits counter. Hence, we have to use a
* spinlock to protect the credit counter, or use atomics.
*
* Spinlocks shared between the send and the receive path are bad,
* because they create unnecessary delays. An early implementation
* using a spinlock showed a 5% degradation in throughput at some
* loads.
*
* This implementation avoids spinlocks completely, putting both
* counters into a single atomic, and updating that atomic using
* atomic_add (in the receive path, when receiving fresh credits),
* and using atomic_cmpxchg when updating the two counters.
*/
int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
{
unsigned int avail, posted, got = 0, advertise;
long oldval, newval;
*adv_credits = 0;
if (!ic->i_flowctl)
return wanted;
try_again:
advertise = 0;
oldval = newval = atomic_read(&ic->i_credits);
posted = IB_GET_POST_CREDITS(oldval);
avail = IB_GET_SEND_CREDITS(oldval);
rdsdebug("wanted=%u credits=%u posted=%u\n",
wanted, avail, posted);
/* The last credit must be used to send a credit update. */
if (avail && !posted)
avail--;
if (avail < wanted) {
struct rds_connection *conn = ic->i_cm_id->context;
/* Oops, there aren't that many credits left! */
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
got = avail;
} else {
/* Sometimes you get what you want, lalala. */
got = wanted;
}
newval -= IB_SET_SEND_CREDITS(got);
/*
* If need_posted is non-zero, then the caller wants
* the posted regardless of whether any send credits are
* available.
*/
if (posted && (got || need_posted)) {
advertise = min_t(unsigned int, posted, max_posted);
newval -= IB_SET_POST_CREDITS(advertise);
}
/* Finally bill everything */
if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
goto try_again;
*adv_credits = advertise;
return got;
}
void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
{
struct rds_ib_connection *ic = conn->c_transport_data;
if (credits == 0)
return;
rdsdebug("credits=%u current=%u%s\n",
credits,
IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
rds_ib_stats_inc(s_ib_rx_credit_updates);
}
void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
{
struct rds_ib_connection *ic = conn->c_transport_data;
if (posted == 0)
return;
atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
/* Decide whether to send an update to the peer now.
* If we would send a credit update for every single buffer we
* post, we would end up with an ACK storm (ACK arrives,
* consumes buffer, we refill the ring, send ACK to remote
* advertising the newly posted buffer... ad inf)
*
* Performance pretty much depends on how often we send
* credit updates - too frequent updates mean lots of ACKs.
* Too infrequent updates, and the peer will run out of
* credits and has to throttle.
* For the time being, 16 seems to be a good compromise.
*/
if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
}
static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
struct rds_ib_send_work *send,
bool notify)
{
/*
* We want to delay signaling completions just enough to get
* the batching benefits but not so much that we create dead time
* on the wire.
*/
if (ic->i_unsignaled_wrs-- == 0 || notify) {
ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
send->s_wr.send_flags |= IB_SEND_SIGNALED;
return 1;
}
return 0;
}
/*
* This can be called multiple times for a given message. The first time
* we see a message we map its scatterlist into the IB device so that
* we can provide that mapped address to the IB scatter gather entries
* in the IB work requests. We translate the scatterlist into a series
* of work requests that fragment the message. These work requests complete
* in order so we pass ownership of the message to the completion handler
* once we send the final fragment.
*
* The RDS core uses the c_send_lock to only enter this function once
* per connection. This makes sure that the tx ring alloc/unalloc pairs
* don't get out of sync and confuse the ring.
*/
int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_device *dev = ic->i_cm_id->device;
struct rds_ib_send_work *send = NULL;
struct rds_ib_send_work *first;
struct rds_ib_send_work *prev;
const struct ib_send_wr *failed_wr;
struct scatterlist *scat;
u32 pos;
u32 i;
u32 work_alloc;
u32 credit_alloc = 0;
u32 posted;
u32 adv_credits = 0;
int send_flags = 0;
int bytes_sent = 0;
int ret;
int flow_controlled = 0;
int nr_sig = 0;
BUG_ON(off % RDS_FRAG_SIZE);
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
/* Do not send cong updates to IB loopback */
if (conn->c_loopback
&& rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
scat = &rm->data.op_sg[sg];
ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
return sizeof(struct rds_header) + ret;
}
/* FIXME we may overallocate here */
if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
i = 1;
else
i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_ring_full);
ret = -ENOMEM;
goto out;
}
if (ic->i_flowctl) {
credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
adv_credits += posted;
if (credit_alloc < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
work_alloc = credit_alloc;
flow_controlled = 1;
}
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_throttle);
ret = -ENOMEM;
goto out;
}
}
/* map the message the first time we see it */
if (!ic->i_data_op) {
if (rm->data.op_nents) {
rm->data.op_count = ib_dma_map_sg(dev,
rm->data.op_sg,
rm->data.op_nents,
DMA_TO_DEVICE);
rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
if (rm->data.op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */
goto out;
}
} else {
rm->data.op_count = 0;
}
rds_message_addref(rm);
rm->data.op_dmasg = 0;
rm->data.op_dmaoff = 0;
ic->i_data_op = &rm->data;
/* Finalize the header */
if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
/* If it has a RDMA op, tell the peer we did it. This is
* used by the peer to release use-once RDMA MRs. */
if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr;
ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
}
if (rm->m_rdma_cookie) {
rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
rds_rdma_cookie_key(rm->m_rdma_cookie),
rds_rdma_cookie_offset(rm->m_rdma_cookie));
}
/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
* we should not do this unless we have a chance of at least
* sticking the header into the send ring. Which is why we
* should call rds_ib_ring_alloc first. */
rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
rds_message_make_checksum(&rm->m_inc.i_hdr);
/*
* Update adv_credits since we reset the ACK_REQUIRED bit.
*/
if (ic->i_flowctl) {
rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
adv_credits += posted;
BUG_ON(adv_credits > 255);
}
}
/* Sometimes you want to put a fence between an RDMA
* READ and the following SEND.
* We could either do this all the time
* or when requested by the user. Right now, we let
* the application choose.
*/
if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE;
/* Each frag gets a header. Msgs may be 0 bytes */
send = &ic->i_sends[pos];
first = send;
prev = NULL;
scat = &ic->i_data_op->op_sg[rm->data.op_dmasg];
i = 0;
do {
unsigned int len = 0;
/* Set up the header */
send->s_wr.send_flags = send_flags;
send->s_wr.opcode = IB_WR_SEND;
send->s_wr.num_sge = 1;
send->s_wr.next = NULL;
send->s_queued = jiffies;
send->s_op = NULL;
send->s_sge[0].addr = ic->i_send_hdrs_dma[pos];
send->s_sge[0].length = sizeof(struct rds_header);
send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev,
ic->i_send_hdrs_dma[pos],
sizeof(struct rds_header),
DMA_TO_DEVICE);
memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr,
sizeof(struct rds_header));
/* Set up the data, if present */
if (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]) {
len = min(RDS_FRAG_SIZE,
sg_dma_len(scat) - rm->data.op_dmaoff);
send->s_wr.num_sge = 2;
send->s_sge[1].addr = sg_dma_address(scat);
send->s_sge[1].addr += rm->data.op_dmaoff;
send->s_sge[1].length = len;
send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
bytes_sent += len;
rm->data.op_dmaoff += len;
if (rm->data.op_dmaoff == sg_dma_len(scat)) {
scat++;
rm->data.op_dmasg++;
rm->data.op_dmaoff = 0;
}
}
rds_ib_set_wr_signal_state(ic, send, false);
/*
* Always signal the last one if we're stopping due to flow control.
*/
if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
rds_ib_set_wr_signal_state(ic, send, true);
send->s_wr.send_flags |= IB_SEND_SOLICITED;
}
if (send->s_wr.send_flags & IB_SEND_SIGNALED)
nr_sig++;
rdsdebug("send %p wr %p num_sge %u next %p\n", send,
&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
if (ic->i_flowctl && adv_credits) {
struct rds_header *hdr = ic->i_send_hdrs[pos];
/* add credit and redo the header checksum */
hdr->h_credit = adv_credits;
rds_message_make_checksum(hdr);
adv_credits = 0;
rds_ib_stats_inc(s_ib_tx_credit_updates);
}
ib_dma_sync_single_for_device(ic->rds_ibdev->dev,
ic->i_send_hdrs_dma[pos],
sizeof(struct rds_header),
DMA_TO_DEVICE);
if (prev)
prev->s_wr.next = &send->s_wr;
prev = send;
pos = (pos + 1) % ic->i_send_ring.w_nr;
send = &ic->i_sends[pos];
i++;
} while (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]);
/* Account the RDS header in the number of bytes we sent, but just once.
* The caller has no concept of fragmentation. */
if (hdr_off == 0)
bytes_sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */
if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_op = ic->i_data_op;
prev->s_wr.send_flags |= IB_SEND_SOLICITED;
if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
ic->i_data_op = NULL;
}
/* Put back wrs & credits we didn't use */
if (i < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
work_alloc = i;
}
if (ic->i_flowctl && i < credit_alloc)
rds_ib_send_add_credits(conn, credit_alloc - i);
if (nr_sig)
atomic_add(nr_sig, &ic->i_signaled_sends);
/* XXX need to worry about failed_wr and partial sends. */
failed_wr = &first->s_wr;
ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
first, &first->s_wr, ret, failed_wr);
BUG_ON(failed_wr != &first->s_wr);
if (ret) {
printk(KERN_WARNING "RDS/IB: ib_post_send to %pI6c "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_sub_signaled(ic, nr_sig);
if (prev->s_op) {
ic->i_data_op = prev->s_op;
prev->s_op = NULL;
}
rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
goto out;
}
ret = bytes_sent;
out:
BUG_ON(adv_credits);
return ret;
}
/*
* Issue atomic operation.
* A simplified version of the rdma case, we always map 1 SG, and
* only 8 bytes, for the return value from the atomic operation.
*/
int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_send_work *send = NULL;
const struct ib_send_wr *failed_wr;
u32 pos;
u32 work_alloc;
int ret;
int nr_sig = 0;
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
if (work_alloc != 1) {
rds_ib_stats_inc(s_ib_tx_ring_full);
ret = -ENOMEM;
goto out;
}
/* address of send request in ring */
send = &ic->i_sends[pos];
send->s_queued = jiffies;
if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
send->s_atomic_wr.compare_add = op->op_m_cswp.compare;
send->s_atomic_wr.swap = op->op_m_cswp.swap;
send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask;
send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask;
} else { /* FADD */
send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
send->s_atomic_wr.compare_add = op->op_m_fadd.add;
send->s_atomic_wr.swap = 0;
send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
send->s_atomic_wr.swap_mask = 0;
}
send->s_wr.send_flags = 0;
nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
send->s_atomic_wr.wr.num_sge = 1;
send->s_atomic_wr.wr.next = NULL;
send->s_atomic_wr.remote_addr = op->op_remote_addr;
send->s_atomic_wr.rkey = op->op_rkey;
send->s_op = op;
rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
/* map 8 byte retval buffer to the device */
ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
if (ret != 1) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
ret = -ENOMEM; /* XXX ? */
goto out;
}
/* Convert our struct scatterlist to struct ib_sge */
send->s_sge[0].addr = sg_dma_address(op->op_sg);
send->s_sge[0].length = sg_dma_len(op->op_sg);
send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
send->s_sge[0].addr, send->s_sge[0].length);
if (nr_sig)
atomic_add(nr_sig, &ic->i_signaled_sends);
failed_wr = &send->s_atomic_wr.wr;
ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
send, &send->s_atomic_wr, ret, failed_wr);
BUG_ON(failed_wr != &send->s_atomic_wr.wr);
if (ret) {
printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI6c "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_sub_signaled(ic, nr_sig);
goto out;
}
if (unlikely(failed_wr != &send->s_atomic_wr.wr)) {
printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
BUG_ON(failed_wr != &send->s_atomic_wr.wr);
}
out:
return ret;
}
int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_send_work *send = NULL;
struct rds_ib_send_work *first;
struct rds_ib_send_work *prev;
const struct ib_send_wr *failed_wr;
struct scatterlist *scat;
unsigned long len;
u64 remote_addr = op->op_remote_addr;
u32 max_sge = ic->rds_ibdev->max_sge;
u32 pos;
u32 work_alloc;
u32 i;
u32 j;
int sent;
int ret;
int num_sge;
int nr_sig = 0;
u64 odp_addr = op->op_odp_addr;
u32 odp_lkey = 0;
/* map the op the first time we see it */
if (!op->op_odp_mr) {
if (!op->op_mapped) {
op->op_count =
ib_dma_map_sg(ic->i_cm_id->device, op->op_sg,
op->op_nents,
(op->op_write) ? DMA_TO_DEVICE :
DMA_FROM_DEVICE);
rdsdebug("ic %p mapping op %p: %d\n", ic, op,
op->op_count);
if (op->op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
ret = -ENOMEM; /* XXX ? */
goto out;
}
op->op_mapped = 1;
}
} else {
op->op_count = op->op_nents;
odp_lkey = rds_ib_get_lkey(op->op_odp_mr->r_trans_private);
}
/*
* Instead of knowing how to return a partial rdma read/write we insist that there
* be enough work requests to send the entire message.
*/
i = DIV_ROUND_UP(op->op_count, max_sge);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc != i) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_stats_inc(s_ib_tx_ring_full);
ret = -ENOMEM;
goto out;
}
send = &ic->i_sends[pos];
first = send;
prev = NULL;
scat = &op->op_sg[0];
sent = 0;
num_sge = op->op_count;
for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
send->s_wr.send_flags = 0;
send->s_queued = jiffies;
send->s_op = NULL;
if (!op->op_notify)
nr_sig += rds_ib_set_wr_signal_state(ic, send,
op->op_notify);
send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
send->s_rdma_wr.remote_addr = remote_addr;
send->s_rdma_wr.rkey = op->op_rkey;
if (num_sge > max_sge) {
send->s_rdma_wr.wr.num_sge = max_sge;
num_sge -= max_sge;
} else {
send->s_rdma_wr.wr.num_sge = num_sge;
}
send->s_rdma_wr.wr.next = NULL;
if (prev)
prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr;
for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
scat != &op->op_sg[op->op_count]; j++) {
len = sg_dma_len(scat);
if (!op->op_odp_mr) {
send->s_sge[j].addr = sg_dma_address(scat);
send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
} else {
send->s_sge[j].addr = odp_addr;
send->s_sge[j].lkey = odp_lkey;
}
send->s_sge[j].length = len;
sent += len;
rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
remote_addr += len;
odp_addr += len;
scat++;
}
rdsdebug("send %p wr %p num_sge %u next %p\n", send,
&send->s_rdma_wr.wr,
send->s_rdma_wr.wr.num_sge,
send->s_rdma_wr.wr.next);
prev = send;
if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
send = ic->i_sends;
}
/* give a reference to the last op */
if (scat == &op->op_sg[op->op_count]) {
prev->s_op = op;
rds_message_addref(container_of(op, struct rds_message, rdma));
}
if (i < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
work_alloc = i;
}
if (nr_sig)
atomic_add(nr_sig, &ic->i_signaled_sends);
failed_wr = &first->s_rdma_wr.wr;
ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
first, &first->s_rdma_wr.wr, ret, failed_wr);
BUG_ON(failed_wr != &first->s_rdma_wr.wr);
if (ret) {
printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI6c "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_sub_signaled(ic, nr_sig);
goto out;
}
if (unlikely(failed_wr != &first->s_rdma_wr.wr)) {
printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
BUG_ON(failed_wr != &first->s_rdma_wr.wr);
}
out:
return ret;
}
void rds_ib_xmit_path_complete(struct rds_conn_path *cp)
{
struct rds_connection *conn = cp->cp_conn;
struct rds_ib_connection *ic = conn->c_transport_data;
/* We may have a pending ACK or window update we were unable
* to send previously (due to flow control). Try again. */
rds_ib_attempt_ack(ic);
}
| linux-master | net/rds/ib_send.c |
/*
* Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <net/tcp.h>
#include <trace/events/sock.h>
#include "rds.h"
#include "tcp.h"
static struct kmem_cache *rds_tcp_incoming_slab;
static void rds_tcp_inc_purge(struct rds_incoming *inc)
{
struct rds_tcp_incoming *tinc;
tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
rdsdebug("purging tinc %p inc %p\n", tinc, inc);
skb_queue_purge(&tinc->ti_skb_list);
}
void rds_tcp_inc_free(struct rds_incoming *inc)
{
struct rds_tcp_incoming *tinc;
tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
rds_tcp_inc_purge(inc);
rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
kmem_cache_free(rds_tcp_incoming_slab, tinc);
}
/*
* this is pretty lame, but, whatever.
*/
int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
{
struct rds_tcp_incoming *tinc;
struct sk_buff *skb;
int ret = 0;
if (!iov_iter_count(to))
goto out;
tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
skb_queue_walk(&tinc->ti_skb_list, skb) {
unsigned long to_copy, skb_off;
for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
to_copy = iov_iter_count(to);
to_copy = min(to_copy, skb->len - skb_off);
if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
return -EFAULT;
rds_stats_add(s_copy_to_user, to_copy);
ret += to_copy;
if (!iov_iter_count(to))
goto out;
}
}
out:
return ret;
}
/*
* We have a series of skbs that have fragmented pieces of the congestion
* bitmap. They must add up to the exact size of the congestion bitmap. We
* use the skb helpers to copy those into the pages that make up the in-memory
* congestion bitmap for the remote address of this connection. We then tell
* the congestion core that the bitmap has been changed so that it can wake up
* sleepers.
*
* This is racing with sending paths which are using test_bit to see if the
* bitmap indicates that their recipient is congested.
*/
static void rds_tcp_cong_recv(struct rds_connection *conn,
struct rds_tcp_incoming *tinc)
{
struct sk_buff *skb;
unsigned int to_copy, skb_off;
unsigned int map_off;
unsigned int map_page;
struct rds_cong_map *map;
int ret;
/* catch completely corrupt packets */
if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
return;
map_page = 0;
map_off = 0;
map = conn->c_fcong;
skb_queue_walk(&tinc->ti_skb_list, skb) {
skb_off = 0;
while (skb_off < skb->len) {
to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
skb->len - skb_off);
BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
/* only returns 0 or -error */
ret = skb_copy_bits(skb, skb_off,
(void *)map->m_page_addrs[map_page] + map_off,
to_copy);
BUG_ON(ret != 0);
skb_off += to_copy;
map_off += to_copy;
if (map_off == PAGE_SIZE) {
map_off = 0;
map_page++;
}
}
}
rds_cong_map_updated(map, ~(u64) 0);
}
struct rds_tcp_desc_arg {
struct rds_conn_path *conn_path;
gfp_t gfp;
};
static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
unsigned int offset, size_t len)
{
struct rds_tcp_desc_arg *arg = desc->arg.data;
struct rds_conn_path *cp = arg->conn_path;
struct rds_tcp_connection *tc = cp->cp_transport_data;
struct rds_tcp_incoming *tinc = tc->t_tinc;
struct sk_buff *clone;
size_t left = len, to_copy;
rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
len);
/*
* tcp_read_sock() interprets partial progress as an indication to stop
* processing.
*/
while (left) {
if (!tinc) {
tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
arg->gfp);
if (!tinc) {
desc->error = -ENOMEM;
goto out;
}
tc->t_tinc = tinc;
rdsdebug("allocated tinc %p\n", tinc);
rds_inc_path_init(&tinc->ti_inc, cp,
&cp->cp_conn->c_faddr);
tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
local_clock();
/*
* XXX * we might be able to use the __ variants when
* we've already serialized at a higher level.
*/
skb_queue_head_init(&tinc->ti_skb_list);
}
if (left && tc->t_tinc_hdr_rem) {
to_copy = min(tc->t_tinc_hdr_rem, left);
rdsdebug("copying %zu header from skb %p\n", to_copy,
skb);
skb_copy_bits(skb, offset,
(char *)&tinc->ti_inc.i_hdr +
sizeof(struct rds_header) -
tc->t_tinc_hdr_rem,
to_copy);
tc->t_tinc_hdr_rem -= to_copy;
left -= to_copy;
offset += to_copy;
if (tc->t_tinc_hdr_rem == 0) {
/* could be 0 for a 0 len message */
tc->t_tinc_data_rem =
be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
local_clock();
}
}
if (left && tc->t_tinc_data_rem) {
to_copy = min(tc->t_tinc_data_rem, left);
clone = pskb_extract(skb, offset, to_copy, arg->gfp);
if (!clone) {
desc->error = -ENOMEM;
goto out;
}
skb_queue_tail(&tinc->ti_skb_list, clone);
rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
"clone %p data %p len %d\n",
skb, skb->data, skb->len, offset, to_copy,
clone, clone->data, clone->len);
tc->t_tinc_data_rem -= to_copy;
left -= to_copy;
offset += to_copy;
}
if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
struct rds_connection *conn = cp->cp_conn;
if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
rds_tcp_cong_recv(conn, tinc);
else
rds_recv_incoming(conn, &conn->c_faddr,
&conn->c_laddr,
&tinc->ti_inc,
arg->gfp);
tc->t_tinc_hdr_rem = sizeof(struct rds_header);
tc->t_tinc_data_rem = 0;
tc->t_tinc = NULL;
rds_inc_put(&tinc->ti_inc);
tinc = NULL;
}
}
out:
rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
len, left, skb->len,
skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
return len - left;
}
/* the caller has to hold the sock lock */
static int rds_tcp_read_sock(struct rds_conn_path *cp, gfp_t gfp)
{
struct rds_tcp_connection *tc = cp->cp_transport_data;
struct socket *sock = tc->t_sock;
read_descriptor_t desc;
struct rds_tcp_desc_arg arg;
/* It's like glib in the kernel! */
arg.conn_path = cp;
arg.gfp = gfp;
desc.arg.data = &arg;
desc.error = 0;
desc.count = 1; /* give more than one skb per call */
tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
desc.error);
return desc.error;
}
/*
* We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
* data_ready.
*
* if we fail to allocate we're in trouble.. blindly wait some time before
* trying again to see if the VM can free up something for us.
*/
int rds_tcp_recv_path(struct rds_conn_path *cp)
{
struct rds_tcp_connection *tc = cp->cp_transport_data;
struct socket *sock = tc->t_sock;
int ret = 0;
rdsdebug("recv worker path [%d] tc %p sock %p\n",
cp->cp_index, tc, sock);
lock_sock(sock->sk);
ret = rds_tcp_read_sock(cp, GFP_KERNEL);
release_sock(sock->sk);
return ret;
}
void rds_tcp_data_ready(struct sock *sk)
{
void (*ready)(struct sock *sk);
struct rds_conn_path *cp;
struct rds_tcp_connection *tc;
trace_sk_data_ready(sk);
rdsdebug("data ready sk %p\n", sk);
read_lock_bh(&sk->sk_callback_lock);
cp = sk->sk_user_data;
if (!cp) { /* check for teardown race */
ready = sk->sk_data_ready;
goto out;
}
tc = cp->cp_transport_data;
ready = tc->t_orig_data_ready;
rds_tcp_stats_inc(s_tcp_data_ready_calls);
if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
rcu_read_lock();
if (!rds_destroy_pending(cp->cp_conn))
queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
rcu_read_unlock();
}
out:
read_unlock_bh(&sk->sk_callback_lock);
ready(sk);
}
int rds_tcp_recv_init(void)
{
rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
sizeof(struct rds_tcp_incoming),
0, 0, NULL);
if (!rds_tcp_incoming_slab)
return -ENOMEM;
return 0;
}
void rds_tcp_recv_exit(void)
{
kmem_cache_destroy(rds_tcp_incoming_slab);
}
| linux-master | net/rds/tcp_recv.c |
/*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include "ib.h"
static struct ctl_table_header *rds_ib_sysctl_hdr;
unsigned long rds_ib_sysctl_max_send_wr = RDS_IB_DEFAULT_SEND_WR;
unsigned long rds_ib_sysctl_max_recv_wr = RDS_IB_DEFAULT_RECV_WR;
unsigned long rds_ib_sysctl_max_recv_allocation = (128 * 1024 * 1024) / RDS_FRAG_SIZE;
static unsigned long rds_ib_sysctl_max_wr_min = 1;
/* hardware will fail CQ creation long before this */
static unsigned long rds_ib_sysctl_max_wr_max = (u32)~0;
unsigned long rds_ib_sysctl_max_unsig_wrs = 16;
static unsigned long rds_ib_sysctl_max_unsig_wr_min = 1;
static unsigned long rds_ib_sysctl_max_unsig_wr_max = 64;
/*
* This sysctl does nothing.
*
* Backwards compatibility with RDS 3.0 wire protocol
* disables initial FC credit exchange.
* If it's ever possible to drop 3.0 support,
* setting this to 1 and moving init/refill of send/recv
* rings from ib_cm_connect_complete() back into ib_setup_qp()
* will cause credits to be added before protocol negotiation.
*/
unsigned int rds_ib_sysctl_flow_control = 0;
static struct ctl_table rds_ib_sysctl_table[] = {
{
.procname = "max_send_wr",
.data = &rds_ib_sysctl_max_send_wr,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = &rds_ib_sysctl_max_wr_min,
.extra2 = &rds_ib_sysctl_max_wr_max,
},
{
.procname = "max_recv_wr",
.data = &rds_ib_sysctl_max_recv_wr,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = &rds_ib_sysctl_max_wr_min,
.extra2 = &rds_ib_sysctl_max_wr_max,
},
{
.procname = "max_unsignaled_wr",
.data = &rds_ib_sysctl_max_unsig_wrs,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = &rds_ib_sysctl_max_unsig_wr_min,
.extra2 = &rds_ib_sysctl_max_unsig_wr_max,
},
{
.procname = "max_recv_allocation",
.data = &rds_ib_sysctl_max_recv_allocation,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "flow_control",
.data = &rds_ib_sysctl_flow_control,
.maxlen = sizeof(rds_ib_sysctl_flow_control),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
void rds_ib_sysctl_exit(void)
{
if (rds_ib_sysctl_hdr)
unregister_net_sysctl_table(rds_ib_sysctl_hdr);
}
int rds_ib_sysctl_init(void)
{
rds_ib_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/ib", rds_ib_sysctl_table);
if (!rds_ib_sysctl_hdr)
return -ENOMEM;
return 0;
}
| linux-master | net/rds/ib_sysctl.c |
/*
* Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/in.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/if_arp.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/addrconf.h>
#include "rds_single_path.h"
#include "rds.h"
#include "ib.h"
#include "ib_mr.h"
static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE;
static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE;
unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
static atomic_t rds_ib_unloading;
module_param(rds_ib_mr_1m_pool_size, int, 0444);
MODULE_PARM_DESC(rds_ib_mr_1m_pool_size, " Max number of 1M mr per HCA");
module_param(rds_ib_mr_8k_pool_size, int, 0444);
MODULE_PARM_DESC(rds_ib_mr_8k_pool_size, " Max number of 8K mr per HCA");
module_param(rds_ib_retry_count, int, 0444);
MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
/*
* we have a clumsy combination of RCU and a rwsem protecting this list
* because it is used both in the get_mr fast path and while blocking in
* the FMR flushing path.
*/
DECLARE_RWSEM(rds_ib_devices_lock);
struct list_head rds_ib_devices;
/* NOTE: if also grabbing ibdev lock, grab this first */
DEFINE_SPINLOCK(ib_nodev_conns_lock);
LIST_HEAD(ib_nodev_conns);
static void rds_ib_nodev_connect(void)
{
struct rds_ib_connection *ic;
spin_lock(&ib_nodev_conns_lock);
list_for_each_entry(ic, &ib_nodev_conns, ib_node)
rds_conn_connect_if_down(ic->conn);
spin_unlock(&ib_nodev_conns_lock);
}
static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
{
struct rds_ib_connection *ic;
unsigned long flags;
spin_lock_irqsave(&rds_ibdev->spinlock, flags);
list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
rds_conn_path_drop(&ic->conn->c_path[0], true);
spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
}
/*
* rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references
* from interrupt context so we push freing off into a work struct in krdsd.
*/
static void rds_ib_dev_free(struct work_struct *work)
{
struct rds_ib_ipaddr *i_ipaddr, *i_next;
struct rds_ib_device *rds_ibdev = container_of(work,
struct rds_ib_device, free_work);
if (rds_ibdev->mr_8k_pool)
rds_ib_destroy_mr_pool(rds_ibdev->mr_8k_pool);
if (rds_ibdev->mr_1m_pool)
rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool);
if (rds_ibdev->pd)
ib_dealloc_pd(rds_ibdev->pd);
list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
list_del(&i_ipaddr->list);
kfree(i_ipaddr);
}
kfree(rds_ibdev->vector_load);
kfree(rds_ibdev);
}
void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
{
BUG_ON(refcount_read(&rds_ibdev->refcount) == 0);
if (refcount_dec_and_test(&rds_ibdev->refcount))
queue_work(rds_wq, &rds_ibdev->free_work);
}
static int rds_ib_add_one(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
int ret;
/* Only handle IB (no iWARP) devices */
if (device->node_type != RDMA_NODE_IB_CA)
return -EOPNOTSUPP;
/* Device must support FRWR */
if (!(device->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
return -EOPNOTSUPP;
rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
ibdev_to_node(device));
if (!rds_ibdev)
return -ENOMEM;
spin_lock_init(&rds_ibdev->spinlock);
refcount_set(&rds_ibdev->refcount, 1);
INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
INIT_LIST_HEAD(&rds_ibdev->conn_list);
rds_ibdev->max_wrs = device->attrs.max_qp_wr;
rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
rds_ibdev->odp_capable =
!!(device->attrs.kernel_cap_flags &
IBK_ON_DEMAND_PAGING) &&
!!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps &
IB_ODP_SUPPORT_WRITE) &&
!!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps &
IB_ODP_SUPPORT_READ);
rds_ibdev->max_1m_mrs = device->attrs.max_mr ?
min_t(unsigned int, (device->attrs.max_mr / 2),
rds_ib_mr_1m_pool_size) : rds_ib_mr_1m_pool_size;
rds_ibdev->max_8k_mrs = device->attrs.max_mr ?
min_t(unsigned int, ((device->attrs.max_mr / 2) * RDS_MR_8K_SCALE),
rds_ib_mr_8k_pool_size) : rds_ib_mr_8k_pool_size;
rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom;
rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom;
rds_ibdev->vector_load = kcalloc(device->num_comp_vectors,
sizeof(int),
GFP_KERNEL);
if (!rds_ibdev->vector_load) {
pr_err("RDS/IB: %s failed to allocate vector memory\n",
__func__);
ret = -ENOMEM;
goto put_dev;
}
rds_ibdev->dev = device;
rds_ibdev->pd = ib_alloc_pd(device, 0);
if (IS_ERR(rds_ibdev->pd)) {
ret = PTR_ERR(rds_ibdev->pd);
rds_ibdev->pd = NULL;
goto put_dev;
}
rds_ibdev->mr_1m_pool =
rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL);
if (IS_ERR(rds_ibdev->mr_1m_pool)) {
ret = PTR_ERR(rds_ibdev->mr_1m_pool);
rds_ibdev->mr_1m_pool = NULL;
goto put_dev;
}
rds_ibdev->mr_8k_pool =
rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL);
if (IS_ERR(rds_ibdev->mr_8k_pool)) {
ret = PTR_ERR(rds_ibdev->mr_8k_pool);
rds_ibdev->mr_8k_pool = NULL;
goto put_dev;
}
rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, max_1m_mrs = %d, max_8k_mrs = %d\n",
device->attrs.max_mr, rds_ibdev->max_wrs, rds_ibdev->max_sge,
rds_ibdev->max_1m_mrs, rds_ibdev->max_8k_mrs);
pr_info("RDS/IB: %s: added\n", device->name);
down_write(&rds_ib_devices_lock);
list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
up_write(&rds_ib_devices_lock);
refcount_inc(&rds_ibdev->refcount);
ib_set_client_data(device, &rds_ib_client, rds_ibdev);
rds_ib_nodev_connect();
return 0;
put_dev:
rds_ib_dev_put(rds_ibdev);
return ret;
}
/*
* New connections use this to find the device to associate with the
* connection. It's not in the fast path so we're not concerned about the
* performance of the IB call. (As of this writing, it uses an interrupt
* blocking spinlock to serialize walking a per-device list of all registered
* clients.)
*
* RCU is used to handle incoming connections racing with device teardown.
* Rather than use a lock to serialize removal from the client_data and
* getting a new reference, we use an RCU grace period. The destruction
* path removes the device from client_data and then waits for all RCU
* readers to finish.
*
* A new connection can get NULL from this if its arriving on a
* device that is in the process of being removed.
*/
struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
rcu_read_lock();
rds_ibdev = ib_get_client_data(device, &rds_ib_client);
if (rds_ibdev)
refcount_inc(&rds_ibdev->refcount);
rcu_read_unlock();
return rds_ibdev;
}
/*
* The IB stack is letting us know that a device is going away. This can
* happen if the underlying HCA driver is removed or if PCI hotplug is removing
* the pci function, for example.
*
* This can be called at any time and can be racing with any other RDS path.
*/
static void rds_ib_remove_one(struct ib_device *device, void *client_data)
{
struct rds_ib_device *rds_ibdev = client_data;
rds_ib_dev_shutdown(rds_ibdev);
/* stop connection attempts from getting a reference to this device. */
ib_set_client_data(device, &rds_ib_client, NULL);
down_write(&rds_ib_devices_lock);
list_del_rcu(&rds_ibdev->list);
up_write(&rds_ib_devices_lock);
/*
* This synchronize rcu is waiting for readers of both the ib
* client data and the devices list to finish before we drop
* both of those references.
*/
synchronize_rcu();
rds_ib_dev_put(rds_ibdev);
rds_ib_dev_put(rds_ibdev);
}
struct ib_client rds_ib_client = {
.name = "rds_ib",
.add = rds_ib_add_one,
.remove = rds_ib_remove_one
};
static int rds_ib_conn_info_visitor(struct rds_connection *conn,
void *buffer)
{
struct rds_info_rdma_connection *iinfo = buffer;
struct rds_ib_connection *ic = conn->c_transport_data;
/* We will only ever look at IB transports */
if (conn->c_trans != &rds_ib_transport)
return 0;
if (conn->c_isv6)
return 0;
iinfo->src_addr = conn->c_laddr.s6_addr32[3];
iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
if (ic) {
iinfo->tos = conn->c_tos;
iinfo->sl = ic->i_sl;
}
memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
(union ib_gid *)&iinfo->dst_gid);
rds_ibdev = ic->rds_ibdev;
iinfo->max_send_wr = ic->i_send_ring.w_nr;
iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
iinfo->max_send_sge = rds_ibdev->max_sge;
rds_ib_get_mr_info(rds_ibdev, iinfo);
iinfo->cache_allocs = atomic_read(&ic->i_cache_allocs);
}
return 1;
}
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 version of rds_ib_conn_info_visitor(). */
static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
void *buffer)
{
struct rds6_info_rdma_connection *iinfo6 = buffer;
struct rds_ib_connection *ic = conn->c_transport_data;
/* We will only ever look at IB transports */
if (conn->c_trans != &rds_ib_transport)
return 0;
iinfo6->src_addr = conn->c_laddr;
iinfo6->dst_addr = conn->c_faddr;
if (ic) {
iinfo6->tos = conn->c_tos;
iinfo6->sl = ic->i_sl;
}
memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
(union ib_gid *)&iinfo6->dst_gid);
rds_ibdev = ic->rds_ibdev;
iinfo6->max_send_wr = ic->i_send_ring.w_nr;
iinfo6->max_recv_wr = ic->i_recv_ring.w_nr;
iinfo6->max_send_sge = rds_ibdev->max_sge;
rds6_ib_get_mr_info(rds_ibdev, iinfo6);
iinfo6->cache_allocs = atomic_read(&ic->i_cache_allocs);
}
return 1;
}
#endif
static void rds_ib_ic_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
u64 buffer[(sizeof(struct rds_info_rdma_connection) + 7) / 8];
rds_for_each_conn_info(sock, len, iter, lens,
rds_ib_conn_info_visitor,
buffer,
sizeof(struct rds_info_rdma_connection));
}
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 version of rds_ib_ic_info(). */
static void rds6_ib_ic_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
u64 buffer[(sizeof(struct rds6_info_rdma_connection) + 7) / 8];
rds_for_each_conn_info(sock, len, iter, lens,
rds6_ib_conn_info_visitor,
buffer,
sizeof(struct rds6_info_rdma_connection));
}
#endif
/*
* Early RDS/IB was built to only bind to an address if there is an IPoIB
* device with that address set.
*
* If it were me, I'd advocate for something more flexible. Sending and
* receiving should be device-agnostic. Transports would try and maintain
* connections between peers who have messages queued. Userspace would be
* allowed to influence which paths have priority. We could call userspace
* asserting this policy "routing".
*/
static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr,
__u32 scope_id)
{
int ret;
struct rdma_cm_id *cm_id;
#if IS_ENABLED(CONFIG_IPV6)
struct sockaddr_in6 sin6;
#endif
struct sockaddr_in sin;
struct sockaddr *sa;
bool isv4;
isv4 = ipv6_addr_v4mapped(addr);
/* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs.
*/
cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler,
NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
if (isv4) {
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = addr->s6_addr32[3];
sa = (struct sockaddr *)&sin;
} else {
#if IS_ENABLED(CONFIG_IPV6)
memset(&sin6, 0, sizeof(sin6));
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = *addr;
sin6.sin6_scope_id = scope_id;
sa = (struct sockaddr *)&sin6;
/* XXX Do a special IPv6 link local address check here. The
* reason is that rdma_bind_addr() always succeeds with IPv6
* link local address regardless it is indeed configured in a
* system.
*/
if (ipv6_addr_type(addr) & IPV6_ADDR_LINKLOCAL) {
struct net_device *dev;
if (scope_id == 0) {
ret = -EADDRNOTAVAIL;
goto out;
}
/* Use init_net for now as RDS is not network
* name space aware.
*/
dev = dev_get_by_index(&init_net, scope_id);
if (!dev) {
ret = -EADDRNOTAVAIL;
goto out;
}
if (!ipv6_chk_addr(&init_net, addr, dev, 1)) {
dev_put(dev);
ret = -EADDRNOTAVAIL;
goto out;
}
dev_put(dev);
}
#else
ret = -EADDRNOTAVAIL;
goto out;
#endif
}
/* rdma_bind_addr will only succeed for IB & iWARP devices */
ret = rdma_bind_addr(cm_id, sa);
/* due to this, we will claim to support iWARP devices unless we
check node_type. */
if (ret || !cm_id->device ||
cm_id->device->node_type != RDMA_NODE_IB_CA)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI6c%%%u ret %d node type %d\n",
addr, scope_id, ret,
cm_id->device ? cm_id->device->node_type : -1);
out:
rdma_destroy_id(cm_id);
return ret;
}
static void rds_ib_unregister_client(void)
{
ib_unregister_client(&rds_ib_client);
/* wait for rds_ib_dev_free() to complete */
flush_workqueue(rds_wq);
}
static void rds_ib_set_unloading(void)
{
atomic_set(&rds_ib_unloading, 1);
}
static bool rds_ib_is_unloading(struct rds_connection *conn)
{
struct rds_conn_path *cp = &conn->c_path[0];
return (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags) ||
atomic_read(&rds_ib_unloading) != 0);
}
void rds_ib_exit(void)
{
rds_ib_set_unloading();
synchronize_rcu();
rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
#if IS_ENABLED(CONFIG_IPV6)
rds_info_deregister_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info);
#endif
rds_ib_unregister_client();
rds_ib_destroy_nodev_conns();
rds_ib_sysctl_exit();
rds_ib_recv_exit();
rds_trans_unregister(&rds_ib_transport);
rds_ib_mr_exit();
}
static u8 rds_ib_get_tos_map(u8 tos)
{
/* 1:1 user to transport map for RDMA transport.
* In future, if custom map is desired, hook can export
* user configurable map.
*/
return tos;
}
struct rds_transport rds_ib_transport = {
.laddr_check = rds_ib_laddr_check,
.xmit_path_complete = rds_ib_xmit_path_complete,
.xmit = rds_ib_xmit,
.xmit_rdma = rds_ib_xmit_rdma,
.xmit_atomic = rds_ib_xmit_atomic,
.recv_path = rds_ib_recv_path,
.conn_alloc = rds_ib_conn_alloc,
.conn_free = rds_ib_conn_free,
.conn_path_connect = rds_ib_conn_path_connect,
.conn_path_shutdown = rds_ib_conn_path_shutdown,
.inc_copy_to_user = rds_ib_inc_copy_to_user,
.inc_free = rds_ib_inc_free,
.cm_initiate_connect = rds_ib_cm_initiate_connect,
.cm_handle_connect = rds_ib_cm_handle_connect,
.cm_connect_complete = rds_ib_cm_connect_complete,
.stats_info_copy = rds_ib_stats_info_copy,
.exit = rds_ib_exit,
.get_mr = rds_ib_get_mr,
.sync_mr = rds_ib_sync_mr,
.free_mr = rds_ib_free_mr,
.flush_mrs = rds_ib_flush_mrs,
.get_tos_map = rds_ib_get_tos_map,
.t_owner = THIS_MODULE,
.t_name = "infiniband",
.t_unloading = rds_ib_is_unloading,
.t_type = RDS_TRANS_IB
};
int rds_ib_init(void)
{
int ret;
INIT_LIST_HEAD(&rds_ib_devices);
ret = rds_ib_mr_init();
if (ret)
goto out;
ret = ib_register_client(&rds_ib_client);
if (ret)
goto out_mr_exit;
ret = rds_ib_sysctl_init();
if (ret)
goto out_ibreg;
ret = rds_ib_recv_init();
if (ret)
goto out_sysctl;
rds_trans_register(&rds_ib_transport);
rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
#if IS_ENABLED(CONFIG_IPV6)
rds_info_register_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info);
#endif
goto out;
out_sysctl:
rds_ib_sysctl_exit();
out_ibreg:
rds_ib_unregister_client();
out_mr_exit:
rds_ib_mr_exit();
out:
return ret;
}
MODULE_LICENSE("GPL");
| linux-master | net/rds/ib.c |
/*
* Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/in.h>
#include <linux/ipv6.h>
#include <linux/poll.h>
#include <net/sock.h>
#include "rds.h"
/* this is just used for stats gathering :/ */
static DEFINE_SPINLOCK(rds_sock_lock);
static unsigned long rds_sock_count;
static LIST_HEAD(rds_sock_list);
DECLARE_WAIT_QUEUE_HEAD(rds_poll_waitq);
/*
* This is called as the final descriptor referencing this socket is closed.
* We have to unbind the socket so that another socket can be bound to the
* address it was using.
*
* We have to be careful about racing with the incoming path. sock_orphan()
* sets SOCK_DEAD and we use that as an indicator to the rx path that new
* messages shouldn't be queued.
*/
static int rds_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct rds_sock *rs;
if (!sk)
goto out;
rs = rds_sk_to_rs(sk);
sock_orphan(sk);
/* Note - rds_clear_recv_queue grabs rs_recv_lock, so
* that ensures the recv path has completed messing
* with the socket. */
rds_clear_recv_queue(rs);
rds_cong_remove_socket(rs);
rds_remove_bound(rs);
rds_send_drop_to(rs, NULL);
rds_rdma_drop_keys(rs);
rds_notify_queue_get(rs, NULL);
rds_notify_msg_zcopy_purge(&rs->rs_zcookie_queue);
spin_lock_bh(&rds_sock_lock);
list_del_init(&rs->rs_item);
rds_sock_count--;
spin_unlock_bh(&rds_sock_lock);
rds_trans_put(rs->rs_transport);
sock->sk = NULL;
sock_put(sk);
out:
return 0;
}
/*
* Careful not to race with rds_release -> sock_orphan which clears sk_sleep.
* _bh() isn't OK here, we're called from interrupt handlers. It's probably OK
* to wake the waitqueue after sk_sleep is clear as we hold a sock ref, but
* this seems more conservative.
* NB - normally, one would use sk_callback_lock for this, but we can
* get here from interrupts, whereas the network code grabs sk_callback_lock
* with _lock_bh only - so relying on sk_callback_lock introduces livelocks.
*/
void rds_wake_sk_sleep(struct rds_sock *rs)
{
unsigned long flags;
read_lock_irqsave(&rs->rs_recv_lock, flags);
__rds_wake_sk_sleep(rds_rs_to_sk(rs));
read_unlock_irqrestore(&rs->rs_recv_lock, flags);
}
static int rds_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct rds_sock *rs = rds_sk_to_rs(sock->sk);
struct sockaddr_in6 *sin6;
struct sockaddr_in *sin;
int uaddr_len;
/* racey, don't care */
if (peer) {
if (ipv6_addr_any(&rs->rs_conn_addr))
return -ENOTCONN;
if (ipv6_addr_v4mapped(&rs->rs_conn_addr)) {
sin = (struct sockaddr_in *)uaddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
sin->sin_family = AF_INET;
sin->sin_port = rs->rs_conn_port;
sin->sin_addr.s_addr = rs->rs_conn_addr_v4;
uaddr_len = sizeof(*sin);
} else {
sin6 = (struct sockaddr_in6 *)uaddr;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = rs->rs_conn_port;
sin6->sin6_addr = rs->rs_conn_addr;
sin6->sin6_flowinfo = 0;
/* scope_id is the same as in the bound address. */
sin6->sin6_scope_id = rs->rs_bound_scope_id;
uaddr_len = sizeof(*sin6);
}
} else {
/* If socket is not yet bound and the socket is connected,
* set the return address family to be the same as the
* connected address, but with 0 address value. If it is not
* connected, set the family to be AF_UNSPEC (value 0) and
* the address size to be that of an IPv4 address.
*/
if (ipv6_addr_any(&rs->rs_bound_addr)) {
if (ipv6_addr_any(&rs->rs_conn_addr)) {
sin = (struct sockaddr_in *)uaddr;
memset(sin, 0, sizeof(*sin));
sin->sin_family = AF_UNSPEC;
return sizeof(*sin);
}
#if IS_ENABLED(CONFIG_IPV6)
if (!(ipv6_addr_type(&rs->rs_conn_addr) &
IPV6_ADDR_MAPPED)) {
sin6 = (struct sockaddr_in6 *)uaddr;
memset(sin6, 0, sizeof(*sin6));
sin6->sin6_family = AF_INET6;
return sizeof(*sin6);
}
#endif
sin = (struct sockaddr_in *)uaddr;
memset(sin, 0, sizeof(*sin));
sin->sin_family = AF_INET;
return sizeof(*sin);
}
if (ipv6_addr_v4mapped(&rs->rs_bound_addr)) {
sin = (struct sockaddr_in *)uaddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
sin->sin_family = AF_INET;
sin->sin_port = rs->rs_bound_port;
sin->sin_addr.s_addr = rs->rs_bound_addr_v4;
uaddr_len = sizeof(*sin);
} else {
sin6 = (struct sockaddr_in6 *)uaddr;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = rs->rs_bound_port;
sin6->sin6_addr = rs->rs_bound_addr;
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = rs->rs_bound_scope_id;
uaddr_len = sizeof(*sin6);
}
}
return uaddr_len;
}
/*
* RDS' poll is without a doubt the least intuitive part of the interface,
* as EPOLLIN and EPOLLOUT do not behave entirely as you would expect from
* a network protocol.
*
* EPOLLIN is asserted if
* - there is data on the receive queue.
* - to signal that a previously congested destination may have become
* uncongested
* - A notification has been queued to the socket (this can be a congestion
* update, or a RDMA completion, or a MSG_ZEROCOPY completion).
*
* EPOLLOUT is asserted if there is room on the send queue. This does not mean
* however, that the next sendmsg() call will succeed. If the application tries
* to send to a congested destination, the system call may still fail (and
* return ENOBUFS).
*/
static __poll_t rds_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
__poll_t mask = 0;
unsigned long flags;
poll_wait(file, sk_sleep(sk), wait);
if (rs->rs_seen_congestion)
poll_wait(file, &rds_poll_waitq, wait);
read_lock_irqsave(&rs->rs_recv_lock, flags);
if (!rs->rs_cong_monitor) {
/* When a congestion map was updated, we signal EPOLLIN for
* "historical" reasons. Applications can also poll for
* WRBAND instead. */
if (rds_cong_updated_since(&rs->rs_cong_track))
mask |= (EPOLLIN | EPOLLRDNORM | EPOLLWRBAND);
} else {
spin_lock(&rs->rs_lock);
if (rs->rs_cong_notify)
mask |= (EPOLLIN | EPOLLRDNORM);
spin_unlock(&rs->rs_lock);
}
if (!list_empty(&rs->rs_recv_queue) ||
!list_empty(&rs->rs_notify_queue) ||
!list_empty(&rs->rs_zcookie_queue.zcookie_head))
mask |= (EPOLLIN | EPOLLRDNORM);
if (rs->rs_snd_bytes < rds_sk_sndbuf(rs))
mask |= (EPOLLOUT | EPOLLWRNORM);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
read_unlock_irqrestore(&rs->rs_recv_lock, flags);
/* clear state any time we wake a seen-congested socket */
if (mask)
rs->rs_seen_congestion = 0;
return mask;
}
static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct rds_sock *rs = rds_sk_to_rs(sock->sk);
rds_tos_t utos, tos = 0;
switch (cmd) {
case SIOCRDSSETTOS:
if (get_user(utos, (rds_tos_t __user *)arg))
return -EFAULT;
if (rs->rs_transport &&
rs->rs_transport->get_tos_map)
tos = rs->rs_transport->get_tos_map(utos);
else
return -ENOIOCTLCMD;
spin_lock_bh(&rds_sock_lock);
if (rs->rs_tos || rs->rs_conn) {
spin_unlock_bh(&rds_sock_lock);
return -EINVAL;
}
rs->rs_tos = tos;
spin_unlock_bh(&rds_sock_lock);
break;
case SIOCRDSGETTOS:
spin_lock_bh(&rds_sock_lock);
tos = rs->rs_tos;
spin_unlock_bh(&rds_sock_lock);
if (put_user(tos, (rds_tos_t __user *)arg))
return -EFAULT;
break;
default:
return -ENOIOCTLCMD;
}
return 0;
}
static int rds_cancel_sent_to(struct rds_sock *rs, sockptr_t optval, int len)
{
struct sockaddr_in6 sin6;
struct sockaddr_in sin;
int ret = 0;
/* racing with another thread binding seems ok here */
if (ipv6_addr_any(&rs->rs_bound_addr)) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (len < sizeof(struct sockaddr_in)) {
ret = -EINVAL;
goto out;
} else if (len < sizeof(struct sockaddr_in6)) {
/* Assume IPv4 */
if (copy_from_sockptr(&sin, optval,
sizeof(struct sockaddr_in))) {
ret = -EFAULT;
goto out;
}
ipv6_addr_set_v4mapped(sin.sin_addr.s_addr, &sin6.sin6_addr);
sin6.sin6_port = sin.sin_port;
} else {
if (copy_from_sockptr(&sin6, optval,
sizeof(struct sockaddr_in6))) {
ret = -EFAULT;
goto out;
}
}
rds_send_drop_to(rs, &sin6);
out:
return ret;
}
static int rds_set_bool_option(unsigned char *optvar, sockptr_t optval,
int optlen)
{
int value;
if (optlen < sizeof(int))
return -EINVAL;
if (copy_from_sockptr(&value, optval, sizeof(int)))
return -EFAULT;
*optvar = !!value;
return 0;
}
static int rds_cong_monitor(struct rds_sock *rs, sockptr_t optval, int optlen)
{
int ret;
ret = rds_set_bool_option(&rs->rs_cong_monitor, optval, optlen);
if (ret == 0) {
if (rs->rs_cong_monitor) {
rds_cong_add_socket(rs);
} else {
rds_cong_remove_socket(rs);
rs->rs_cong_mask = 0;
rs->rs_cong_notify = 0;
}
}
return ret;
}
static int rds_set_transport(struct rds_sock *rs, sockptr_t optval, int optlen)
{
int t_type;
if (rs->rs_transport)
return -EOPNOTSUPP; /* previously attached to transport */
if (optlen != sizeof(int))
return -EINVAL;
if (copy_from_sockptr(&t_type, optval, sizeof(t_type)))
return -EFAULT;
if (t_type < 0 || t_type >= RDS_TRANS_COUNT)
return -EINVAL;
rs->rs_transport = rds_trans_get(t_type);
return rs->rs_transport ? 0 : -ENOPROTOOPT;
}
static int rds_enable_recvtstamp(struct sock *sk, sockptr_t optval,
int optlen, int optname)
{
int val, valbool;
if (optlen != sizeof(int))
return -EFAULT;
if (copy_from_sockptr(&val, optval, sizeof(int)))
return -EFAULT;
valbool = val ? 1 : 0;
if (optname == SO_TIMESTAMP_NEW)
sock_set_flag(sk, SOCK_TSTAMP_NEW);
if (valbool)
sock_set_flag(sk, SOCK_RCVTSTAMP);
else
sock_reset_flag(sk, SOCK_RCVTSTAMP);
return 0;
}
static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval,
int optlen)
{
struct rds_rx_trace_so trace;
int i;
if (optlen != sizeof(struct rds_rx_trace_so))
return -EFAULT;
if (copy_from_sockptr(&trace, optval, sizeof(trace)))
return -EFAULT;
if (trace.rx_traces > RDS_MSG_RX_DGRAM_TRACE_MAX)
return -EFAULT;
rs->rs_rx_traces = trace.rx_traces;
for (i = 0; i < rs->rs_rx_traces; i++) {
if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) {
rs->rs_rx_traces = 0;
return -EFAULT;
}
rs->rs_rx_trace[i] = trace.rx_trace_pos[i];
}
return 0;
}
static int rds_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct rds_sock *rs = rds_sk_to_rs(sock->sk);
int ret;
if (level != SOL_RDS) {
ret = -ENOPROTOOPT;
goto out;
}
switch (optname) {
case RDS_CANCEL_SENT_TO:
ret = rds_cancel_sent_to(rs, optval, optlen);
break;
case RDS_GET_MR:
ret = rds_get_mr(rs, optval, optlen);
break;
case RDS_GET_MR_FOR_DEST:
ret = rds_get_mr_for_dest(rs, optval, optlen);
break;
case RDS_FREE_MR:
ret = rds_free_mr(rs, optval, optlen);
break;
case RDS_RECVERR:
ret = rds_set_bool_option(&rs->rs_recverr, optval, optlen);
break;
case RDS_CONG_MONITOR:
ret = rds_cong_monitor(rs, optval, optlen);
break;
case SO_RDS_TRANSPORT:
lock_sock(sock->sk);
ret = rds_set_transport(rs, optval, optlen);
release_sock(sock->sk);
break;
case SO_TIMESTAMP_OLD:
case SO_TIMESTAMP_NEW:
lock_sock(sock->sk);
ret = rds_enable_recvtstamp(sock->sk, optval, optlen, optname);
release_sock(sock->sk);
break;
case SO_RDS_MSG_RXPATH_LATENCY:
ret = rds_recv_track_latency(rs, optval, optlen);
break;
default:
ret = -ENOPROTOOPT;
}
out:
return ret;
}
static int rds_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct rds_sock *rs = rds_sk_to_rs(sock->sk);
int ret = -ENOPROTOOPT, len;
int trans;
if (level != SOL_RDS)
goto out;
if (get_user(len, optlen)) {
ret = -EFAULT;
goto out;
}
switch (optname) {
case RDS_INFO_FIRST ... RDS_INFO_LAST:
ret = rds_info_getsockopt(sock, optname, optval,
optlen);
break;
case RDS_RECVERR:
if (len < sizeof(int))
ret = -EINVAL;
else
if (put_user(rs->rs_recverr, (int __user *) optval) ||
put_user(sizeof(int), optlen))
ret = -EFAULT;
else
ret = 0;
break;
case SO_RDS_TRANSPORT:
if (len < sizeof(int)) {
ret = -EINVAL;
break;
}
trans = (rs->rs_transport ? rs->rs_transport->t_type :
RDS_TRANS_NONE); /* unbound */
if (put_user(trans, (int __user *)optval) ||
put_user(sizeof(int), optlen))
ret = -EFAULT;
else
ret = 0;
break;
default:
break;
}
out:
return ret;
}
static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_in *sin;
struct rds_sock *rs = rds_sk_to_rs(sk);
int ret = 0;
if (addr_len < offsetofend(struct sockaddr, sa_family))
return -EINVAL;
lock_sock(sk);
switch (uaddr->sa_family) {
case AF_INET:
sin = (struct sockaddr_in *)uaddr;
if (addr_len < sizeof(struct sockaddr_in)) {
ret = -EINVAL;
break;
}
if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
ret = -EDESTADDRREQ;
break;
}
if (ipv4_is_multicast(sin->sin_addr.s_addr) ||
sin->sin_addr.s_addr == htonl(INADDR_BROADCAST)) {
ret = -EINVAL;
break;
}
ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &rs->rs_conn_addr);
rs->rs_conn_port = sin->sin_port;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
struct sockaddr_in6 *sin6;
int addr_type;
sin6 = (struct sockaddr_in6 *)uaddr;
if (addr_len < sizeof(struct sockaddr_in6)) {
ret = -EINVAL;
break;
}
addr_type = ipv6_addr_type(&sin6->sin6_addr);
if (!(addr_type & IPV6_ADDR_UNICAST)) {
__be32 addr4;
if (!(addr_type & IPV6_ADDR_MAPPED)) {
ret = -EPROTOTYPE;
break;
}
/* It is a mapped address. Need to do some sanity
* checks.
*/
addr4 = sin6->sin6_addr.s6_addr32[3];
if (addr4 == htonl(INADDR_ANY) ||
addr4 == htonl(INADDR_BROADCAST) ||
ipv4_is_multicast(addr4)) {
ret = -EPROTOTYPE;
break;
}
}
if (addr_type & IPV6_ADDR_LINKLOCAL) {
/* If socket is arleady bound to a link local address,
* the peer address must be on the same link.
*/
if (sin6->sin6_scope_id == 0 ||
(!ipv6_addr_any(&rs->rs_bound_addr) &&
rs->rs_bound_scope_id &&
sin6->sin6_scope_id != rs->rs_bound_scope_id)) {
ret = -EINVAL;
break;
}
/* Remember the connected address scope ID. It will
* be checked against the binding local address when
* the socket is bound.
*/
rs->rs_bound_scope_id = sin6->sin6_scope_id;
}
rs->rs_conn_addr = sin6->sin6_addr;
rs->rs_conn_port = sin6->sin6_port;
break;
}
#endif
default:
ret = -EAFNOSUPPORT;
break;
}
release_sock(sk);
return ret;
}
static struct proto rds_proto = {
.name = "RDS",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rds_sock),
};
static const struct proto_ops rds_proto_ops = {
.family = AF_RDS,
.owner = THIS_MODULE,
.release = rds_release,
.bind = rds_bind,
.connect = rds_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = rds_getname,
.poll = rds_poll,
.ioctl = rds_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = rds_setsockopt,
.getsockopt = rds_getsockopt,
.sendmsg = rds_sendmsg,
.recvmsg = rds_recvmsg,
.mmap = sock_no_mmap,
};
static void rds_sock_destruct(struct sock *sk)
{
struct rds_sock *rs = rds_sk_to_rs(sk);
WARN_ON((&rs->rs_item != rs->rs_item.next ||
&rs->rs_item != rs->rs_item.prev));
}
static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
{
struct rds_sock *rs;
sock_init_data(sock, sk);
sock->ops = &rds_proto_ops;
sk->sk_protocol = protocol;
sk->sk_destruct = rds_sock_destruct;
rs = rds_sk_to_rs(sk);
spin_lock_init(&rs->rs_lock);
rwlock_init(&rs->rs_recv_lock);
INIT_LIST_HEAD(&rs->rs_send_queue);
INIT_LIST_HEAD(&rs->rs_recv_queue);
INIT_LIST_HEAD(&rs->rs_notify_queue);
INIT_LIST_HEAD(&rs->rs_cong_list);
rds_message_zcopy_queue_init(&rs->rs_zcookie_queue);
spin_lock_init(&rs->rs_rdma_lock);
rs->rs_rdma_keys = RB_ROOT;
rs->rs_rx_traces = 0;
rs->rs_tos = 0;
rs->rs_conn = NULL;
spin_lock_bh(&rds_sock_lock);
list_add_tail(&rs->rs_item, &rds_sock_list);
rds_sock_count++;
spin_unlock_bh(&rds_sock_lock);
return 0;
}
static int rds_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
if (sock->type != SOCK_SEQPACKET || protocol)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, AF_RDS, GFP_KERNEL, &rds_proto, kern);
if (!sk)
return -ENOMEM;
return __rds_create(sock, sk, protocol);
}
void rds_sock_addref(struct rds_sock *rs)
{
sock_hold(rds_rs_to_sk(rs));
}
void rds_sock_put(struct rds_sock *rs)
{
sock_put(rds_rs_to_sk(rs));
}
static const struct net_proto_family rds_family_ops = {
.family = AF_RDS,
.create = rds_create,
.owner = THIS_MODULE,
};
static void rds_sock_inc_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
struct rds_sock *rs;
struct rds_incoming *inc;
unsigned int total = 0;
len /= sizeof(struct rds_info_message);
spin_lock_bh(&rds_sock_lock);
list_for_each_entry(rs, &rds_sock_list, rs_item) {
/* This option only supports IPv4 sockets. */
if (!ipv6_addr_v4mapped(&rs->rs_bound_addr))
continue;
read_lock(&rs->rs_recv_lock);
/* XXX too lazy to maintain counts.. */
list_for_each_entry(inc, &rs->rs_recv_queue, i_item) {
total++;
if (total <= len)
rds_inc_info_copy(inc, iter,
inc->i_saddr.s6_addr32[3],
rs->rs_bound_addr_v4,
1);
}
read_unlock(&rs->rs_recv_lock);
}
spin_unlock_bh(&rds_sock_lock);
lens->nr = total;
lens->each = sizeof(struct rds_info_message);
}
#if IS_ENABLED(CONFIG_IPV6)
static void rds6_sock_inc_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
struct rds_incoming *inc;
unsigned int total = 0;
struct rds_sock *rs;
len /= sizeof(struct rds6_info_message);
spin_lock_bh(&rds_sock_lock);
list_for_each_entry(rs, &rds_sock_list, rs_item) {
read_lock(&rs->rs_recv_lock);
list_for_each_entry(inc, &rs->rs_recv_queue, i_item) {
total++;
if (total <= len)
rds6_inc_info_copy(inc, iter, &inc->i_saddr,
&rs->rs_bound_addr, 1);
}
read_unlock(&rs->rs_recv_lock);
}
spin_unlock_bh(&rds_sock_lock);
lens->nr = total;
lens->each = sizeof(struct rds6_info_message);
}
#endif
static void rds_sock_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
struct rds_info_socket sinfo;
unsigned int cnt = 0;
struct rds_sock *rs;
len /= sizeof(struct rds_info_socket);
spin_lock_bh(&rds_sock_lock);
if (len < rds_sock_count) {
cnt = rds_sock_count;
goto out;
}
list_for_each_entry(rs, &rds_sock_list, rs_item) {
/* This option only supports IPv4 sockets. */
if (!ipv6_addr_v4mapped(&rs->rs_bound_addr))
continue;
sinfo.sndbuf = rds_sk_sndbuf(rs);
sinfo.rcvbuf = rds_sk_rcvbuf(rs);
sinfo.bound_addr = rs->rs_bound_addr_v4;
sinfo.connected_addr = rs->rs_conn_addr_v4;
sinfo.bound_port = rs->rs_bound_port;
sinfo.connected_port = rs->rs_conn_port;
sinfo.inum = sock_i_ino(rds_rs_to_sk(rs));
rds_info_copy(iter, &sinfo, sizeof(sinfo));
cnt++;
}
out:
lens->nr = cnt;
lens->each = sizeof(struct rds_info_socket);
spin_unlock_bh(&rds_sock_lock);
}
#if IS_ENABLED(CONFIG_IPV6)
static void rds6_sock_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
struct rds6_info_socket sinfo6;
struct rds_sock *rs;
len /= sizeof(struct rds6_info_socket);
spin_lock_bh(&rds_sock_lock);
if (len < rds_sock_count)
goto out;
list_for_each_entry(rs, &rds_sock_list, rs_item) {
sinfo6.sndbuf = rds_sk_sndbuf(rs);
sinfo6.rcvbuf = rds_sk_rcvbuf(rs);
sinfo6.bound_addr = rs->rs_bound_addr;
sinfo6.connected_addr = rs->rs_conn_addr;
sinfo6.bound_port = rs->rs_bound_port;
sinfo6.connected_port = rs->rs_conn_port;
sinfo6.inum = sock_i_ino(rds_rs_to_sk(rs));
rds_info_copy(iter, &sinfo6, sizeof(sinfo6));
}
out:
lens->nr = rds_sock_count;
lens->each = sizeof(struct rds6_info_socket);
spin_unlock_bh(&rds_sock_lock);
}
#endif
static void rds_exit(void)
{
sock_unregister(rds_family_ops.family);
proto_unregister(&rds_proto);
rds_conn_exit();
rds_cong_exit();
rds_sysctl_exit();
rds_threads_exit();
rds_stats_exit();
rds_page_exit();
rds_bind_lock_destroy();
rds_info_deregister_func(RDS_INFO_SOCKETS, rds_sock_info);
rds_info_deregister_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
#if IS_ENABLED(CONFIG_IPV6)
rds_info_deregister_func(RDS6_INFO_SOCKETS, rds6_sock_info);
rds_info_deregister_func(RDS6_INFO_RECV_MESSAGES, rds6_sock_inc_info);
#endif
}
module_exit(rds_exit);
u32 rds_gen_num;
static int __init rds_init(void)
{
int ret;
net_get_random_once(&rds_gen_num, sizeof(rds_gen_num));
ret = rds_bind_lock_init();
if (ret)
goto out;
ret = rds_conn_init();
if (ret)
goto out_bind;
ret = rds_threads_init();
if (ret)
goto out_conn;
ret = rds_sysctl_init();
if (ret)
goto out_threads;
ret = rds_stats_init();
if (ret)
goto out_sysctl;
ret = proto_register(&rds_proto, 1);
if (ret)
goto out_stats;
ret = sock_register(&rds_family_ops);
if (ret)
goto out_proto;
rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info);
rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
#if IS_ENABLED(CONFIG_IPV6)
rds_info_register_func(RDS6_INFO_SOCKETS, rds6_sock_info);
rds_info_register_func(RDS6_INFO_RECV_MESSAGES, rds6_sock_inc_info);
#endif
goto out;
out_proto:
proto_unregister(&rds_proto);
out_stats:
rds_stats_exit();
out_sysctl:
rds_sysctl_exit();
out_threads:
rds_threads_exit();
out_conn:
rds_conn_exit();
rds_cong_exit();
rds_page_exit();
out_bind:
rds_bind_lock_destroy();
out:
return ret;
}
module_init(rds_init);
#define DRV_VERSION "4.0"
#define DRV_RELDATE "Feb 12, 2009"
MODULE_AUTHOR("Oracle Corporation <[email protected]>");
MODULE_DESCRIPTION("RDS: Reliable Datagram Sockets"
" v" DRV_VERSION " (" DRV_RELDATE ")");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_NETPROTO(PF_RDS);
| linux-master | net/rds/af_rds.c |
/*
* Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/in.h>
#include <net/tcp.h>
#include "rds.h"
#include "tcp.h"
void rds_tcp_state_change(struct sock *sk)
{
void (*state_change)(struct sock *sk);
struct rds_conn_path *cp;
struct rds_tcp_connection *tc;
read_lock_bh(&sk->sk_callback_lock);
cp = sk->sk_user_data;
if (!cp) {
state_change = sk->sk_state_change;
goto out;
}
tc = cp->cp_transport_data;
state_change = tc->t_orig_state_change;
rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
switch (sk->sk_state) {
/* ignore connecting sockets as they make progress */
case TCP_SYN_SENT:
case TCP_SYN_RECV:
break;
case TCP_ESTABLISHED:
/* Force the peer to reconnect so that we have the
* TCP ports going from <smaller-ip>.<transient> to
* <larger-ip>.<RDS_TCP_PORT>. We avoid marking the
* RDS connection as RDS_CONN_UP until the reconnect,
* to avoid RDS datagram loss.
*/
if (rds_addr_cmp(&cp->cp_conn->c_laddr,
&cp->cp_conn->c_faddr) >= 0 &&
rds_conn_path_transition(cp, RDS_CONN_CONNECTING,
RDS_CONN_ERROR)) {
rds_conn_path_drop(cp, false);
} else {
rds_connect_path_complete(cp, RDS_CONN_CONNECTING);
}
break;
case TCP_CLOSE_WAIT:
case TCP_CLOSE:
rds_conn_path_drop(cp, false);
break;
default:
break;
}
out:
read_unlock_bh(&sk->sk_callback_lock);
state_change(sk);
}
int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
{
struct socket *sock = NULL;
struct sockaddr_in6 sin6;
struct sockaddr_in sin;
struct sockaddr *addr;
int addrlen;
bool isv6;
int ret;
struct rds_connection *conn = cp->cp_conn;
struct rds_tcp_connection *tc = cp->cp_transport_data;
/* for multipath rds,we only trigger the connection after
* the handshake probe has determined the number of paths.
*/
if (cp->cp_index > 0 && cp->cp_conn->c_npaths < 2)
return -EAGAIN;
mutex_lock(&tc->t_conn_path_lock);
if (rds_conn_path_up(cp)) {
mutex_unlock(&tc->t_conn_path_lock);
return 0;
}
if (ipv6_addr_v4mapped(&conn->c_laddr)) {
ret = sock_create_kern(rds_conn_net(conn), PF_INET,
SOCK_STREAM, IPPROTO_TCP, &sock);
isv6 = false;
} else {
ret = sock_create_kern(rds_conn_net(conn), PF_INET6,
SOCK_STREAM, IPPROTO_TCP, &sock);
isv6 = true;
}
if (ret < 0)
goto out;
if (!rds_tcp_tune(sock)) {
ret = -EINVAL;
goto out;
}
if (isv6) {
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = conn->c_laddr;
sin6.sin6_port = 0;
sin6.sin6_flowinfo = 0;
sin6.sin6_scope_id = conn->c_dev_if;
addr = (struct sockaddr *)&sin6;
addrlen = sizeof(sin6);
} else {
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = conn->c_laddr.s6_addr32[3];
sin.sin_port = 0;
addr = (struct sockaddr *)&sin;
addrlen = sizeof(sin);
}
ret = sock->ops->bind(sock, addr, addrlen);
if (ret) {
rdsdebug("bind failed with %d at address %pI6c\n",
ret, &conn->c_laddr);
goto out;
}
if (isv6) {
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = conn->c_faddr;
sin6.sin6_port = htons(RDS_TCP_PORT);
sin6.sin6_flowinfo = 0;
sin6.sin6_scope_id = conn->c_dev_if;
addr = (struct sockaddr *)&sin6;
addrlen = sizeof(sin6);
} else {
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = conn->c_faddr.s6_addr32[3];
sin.sin_port = htons(RDS_TCP_PORT);
addr = (struct sockaddr *)&sin;
addrlen = sizeof(sin);
}
/*
* once we call connect() we can start getting callbacks and they
* own the socket
*/
rds_tcp_set_callbacks(sock, cp);
ret = sock->ops->connect(sock, addr, addrlen, O_NONBLOCK);
rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)
ret = 0;
if (ret == 0) {
rds_tcp_keepalive(sock);
sock = NULL;
} else {
rds_tcp_restore_callbacks(sock, cp->cp_transport_data);
}
out:
mutex_unlock(&tc->t_conn_path_lock);
if (sock)
sock_release(sock);
return ret;
}
/*
* Before killing the tcp socket this needs to serialize with callbacks. The
* caller has already grabbed the sending sem so we're serialized with other
* senders.
*
* TCP calls the callbacks with the sock lock so we hold it while we reset the
* callbacks to those set by TCP. Our callbacks won't execute again once we
* hold the sock lock.
*/
void rds_tcp_conn_path_shutdown(struct rds_conn_path *cp)
{
struct rds_tcp_connection *tc = cp->cp_transport_data;
struct socket *sock = tc->t_sock;
rdsdebug("shutting down conn %p tc %p sock %p\n",
cp->cp_conn, tc, sock);
if (sock) {
if (rds_destroy_pending(cp->cp_conn))
sock_no_linger(sock->sk);
sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN);
lock_sock(sock->sk);
rds_tcp_restore_callbacks(sock, tc); /* tc->tc_sock = NULL */
release_sock(sock->sk);
sock_release(sock);
}
if (tc->t_tinc) {
rds_inc_put(&tc->t_tinc->ti_inc);
tc->t_tinc = NULL;
}
tc->t_tinc_hdr_rem = sizeof(struct rds_header);
tc->t_tinc_data_rem = 0;
}
| linux-master | net/rds/tcp_connect.c |
/*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/percpu.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
#include "rds.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
EXPORT_PER_CPU_SYMBOL_GPL(rds_stats);
/* :.,$s/unsigned long\>.*\<s_\(.*\);/"\1",/g */
static const char *const rds_stat_names[] = {
"conn_reset",
"recv_drop_bad_checksum",
"recv_drop_old_seq",
"recv_drop_no_sock",
"recv_drop_dead_sock",
"recv_deliver_raced",
"recv_delivered",
"recv_queued",
"recv_immediate_retry",
"recv_delayed_retry",
"recv_ack_required",
"recv_rdma_bytes",
"recv_ping",
"send_queue_empty",
"send_queue_full",
"send_lock_contention",
"send_lock_queue_raced",
"send_immediate_retry",
"send_delayed_retry",
"send_drop_acked",
"send_ack_required",
"send_queued",
"send_rdma",
"send_rdma_bytes",
"send_pong",
"page_remainder_hit",
"page_remainder_miss",
"copy_to_user",
"copy_from_user",
"cong_update_queued",
"cong_update_received",
"cong_send_error",
"cong_send_blocked",
"recv_bytes_added_to_sock",
"recv_bytes_freed_fromsock",
"send_stuck_rm",
};
void rds_stats_info_copy(struct rds_info_iterator *iter,
uint64_t *values, const char *const *names, size_t nr)
{
struct rds_info_counter ctr;
size_t i;
for (i = 0; i < nr; i++) {
BUG_ON(strlen(names[i]) >= sizeof(ctr.name));
strncpy(ctr.name, names[i], sizeof(ctr.name) - 1);
ctr.name[sizeof(ctr.name) - 1] = '\0';
ctr.value = values[i];
rds_info_copy(iter, &ctr, sizeof(ctr));
}
}
EXPORT_SYMBOL_GPL(rds_stats_info_copy);
/*
* This gives global counters across all the transports. The strings
* are copied in so that the tool doesn't need knowledge of the specific
* stats that we're exporting. Some are pretty implementation dependent
* and may change over time. That doesn't stop them from being useful.
*
* This is the only function in the chain that knows about the byte granular
* length in userspace. It converts it to number of stat entries that the
* rest of the functions operate in.
*/
static void rds_stats_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
struct rds_statistics stats = {0, };
uint64_t *src;
uint64_t *sum;
size_t i;
int cpu;
unsigned int avail;
avail = len / sizeof(struct rds_info_counter);
if (avail < ARRAY_SIZE(rds_stat_names)) {
avail = 0;
goto trans;
}
for_each_online_cpu(cpu) {
src = (uint64_t *)&(per_cpu(rds_stats, cpu));
sum = (uint64_t *)&stats;
for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++)
*(sum++) += *(src++);
}
rds_stats_info_copy(iter, (uint64_t *)&stats, rds_stat_names,
ARRAY_SIZE(rds_stat_names));
avail -= ARRAY_SIZE(rds_stat_names);
trans:
lens->each = sizeof(struct rds_info_counter);
lens->nr = rds_trans_stats_info_copy(iter, avail) +
ARRAY_SIZE(rds_stat_names);
}
void rds_stats_exit(void)
{
rds_info_deregister_func(RDS_INFO_COUNTERS, rds_stats_info);
}
int rds_stats_init(void)
{
rds_info_register_func(RDS_INFO_COUNTERS, rds_stats_info);
return 0;
}
| linux-master | net/rds/stats.c |
/*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/percpu.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include "rds.h"
#include "tcp.h"
DEFINE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats)
____cacheline_aligned;
static const char * const rds_tcp_stat_names[] = {
"tcp_data_ready_calls",
"tcp_write_space_calls",
"tcp_sndbuf_full",
"tcp_connect_raced",
"tcp_listen_closed_stale",
};
unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail)
{
struct rds_tcp_statistics stats = {0, };
uint64_t *src;
uint64_t *sum;
size_t i;
int cpu;
if (avail < ARRAY_SIZE(rds_tcp_stat_names))
goto out;
for_each_online_cpu(cpu) {
src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu));
sum = (uint64_t *)&stats;
for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++)
*(sum++) += *(src++);
}
rds_stats_info_copy(iter, (uint64_t *)&stats, rds_tcp_stat_names,
ARRAY_SIZE(rds_tcp_stat_names));
out:
return ARRAY_SIZE(rds_tcp_stat_names);
}
| linux-master | net/rds/tcp_stats.c |
/*
* Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/in.h>
#include <linux/export.h>
#include <linux/sched/clock.h>
#include <linux/time.h>
#include <linux/rds.h>
#include "rds.h"
void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
struct in6_addr *saddr)
{
refcount_set(&inc->i_refcount, 1);
INIT_LIST_HEAD(&inc->i_item);
inc->i_conn = conn;
inc->i_saddr = *saddr;
inc->i_usercopy.rdma_cookie = 0;
inc->i_usercopy.rx_tstamp = ktime_set(0, 0);
memset(inc->i_rx_lat_trace, 0, sizeof(inc->i_rx_lat_trace));
}
EXPORT_SYMBOL_GPL(rds_inc_init);
void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
struct in6_addr *saddr)
{
refcount_set(&inc->i_refcount, 1);
INIT_LIST_HEAD(&inc->i_item);
inc->i_conn = cp->cp_conn;
inc->i_conn_path = cp;
inc->i_saddr = *saddr;
inc->i_usercopy.rdma_cookie = 0;
inc->i_usercopy.rx_tstamp = ktime_set(0, 0);
}
EXPORT_SYMBOL_GPL(rds_inc_path_init);
static void rds_inc_addref(struct rds_incoming *inc)
{
rdsdebug("addref inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
refcount_inc(&inc->i_refcount);
}
void rds_inc_put(struct rds_incoming *inc)
{
rdsdebug("put inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
if (refcount_dec_and_test(&inc->i_refcount)) {
BUG_ON(!list_empty(&inc->i_item));
inc->i_conn->c_trans->inc_free(inc);
}
}
EXPORT_SYMBOL_GPL(rds_inc_put);
static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
struct rds_cong_map *map,
int delta, __be16 port)
{
int now_congested;
if (delta == 0)
return;
rs->rs_rcv_bytes += delta;
if (delta > 0)
rds_stats_add(s_recv_bytes_added_to_socket, delta);
else
rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
/* loop transport doesn't send/recv congestion updates */
if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
return;
now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
rdsdebug("rs %p (%pI6c:%u) recv bytes %d buf %d "
"now_cong %d delta %d\n",
rs, &rs->rs_bound_addr,
ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
rds_sk_rcvbuf(rs), now_congested, delta);
/* wasn't -> am congested */
if (!rs->rs_congested && now_congested) {
rs->rs_congested = 1;
rds_cong_set_bit(map, port);
rds_cong_queue_updates(map);
}
/* was -> aren't congested */
/* Require more free space before reporting uncongested to prevent
bouncing cong/uncong state too often */
else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
rs->rs_congested = 0;
rds_cong_clear_bit(map, port);
rds_cong_queue_updates(map);
}
/* do nothing if no change in cong state */
}
static void rds_conn_peer_gen_update(struct rds_connection *conn,
u32 peer_gen_num)
{
int i;
struct rds_message *rm, *tmp;
unsigned long flags;
WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP);
if (peer_gen_num != 0) {
if (conn->c_peer_gen_num != 0 &&
peer_gen_num != conn->c_peer_gen_num) {
for (i = 0; i < RDS_MPATH_WORKERS; i++) {
struct rds_conn_path *cp;
cp = &conn->c_path[i];
spin_lock_irqsave(&cp->cp_lock, flags);
cp->cp_next_tx_seq = 1;
cp->cp_next_rx_seq = 0;
list_for_each_entry_safe(rm, tmp,
&cp->cp_retrans,
m_conn_item) {
set_bit(RDS_MSG_FLUSH, &rm->m_flags);
}
spin_unlock_irqrestore(&cp->cp_lock, flags);
}
}
conn->c_peer_gen_num = peer_gen_num;
}
}
/*
* Process all extension headers that come with this message.
*/
static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
{
struct rds_header *hdr = &inc->i_hdr;
unsigned int pos = 0, type, len;
union {
struct rds_ext_header_version version;
struct rds_ext_header_rdma rdma;
struct rds_ext_header_rdma_dest rdma_dest;
} buffer;
while (1) {
len = sizeof(buffer);
type = rds_message_next_extension(hdr, &pos, &buffer, &len);
if (type == RDS_EXTHDR_NONE)
break;
/* Process extension header here */
switch (type) {
case RDS_EXTHDR_RDMA:
rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
break;
case RDS_EXTHDR_RDMA_DEST:
/* We ignore the size for now. We could stash it
* somewhere and use it for error checking. */
inc->i_usercopy.rdma_cookie = rds_rdma_make_cookie(
be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
break;
}
}
}
static void rds_recv_hs_exthdrs(struct rds_header *hdr,
struct rds_connection *conn)
{
unsigned int pos = 0, type, len;
union {
struct rds_ext_header_version version;
u16 rds_npaths;
u32 rds_gen_num;
} buffer;
u32 new_peer_gen_num = 0;
while (1) {
len = sizeof(buffer);
type = rds_message_next_extension(hdr, &pos, &buffer, &len);
if (type == RDS_EXTHDR_NONE)
break;
/* Process extension header here */
switch (type) {
case RDS_EXTHDR_NPATHS:
conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
be16_to_cpu(buffer.rds_npaths));
break;
case RDS_EXTHDR_GEN_NUM:
new_peer_gen_num = be32_to_cpu(buffer.rds_gen_num);
break;
default:
pr_warn_ratelimited("ignoring unknown exthdr type "
"0x%x\n", type);
}
}
/* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
conn->c_npaths = max_t(int, conn->c_npaths, 1);
conn->c_ping_triggered = 0;
rds_conn_peer_gen_update(conn, new_peer_gen_num);
}
/* rds_start_mprds() will synchronously start multiple paths when appropriate.
* The scheme is based on the following rules:
*
* 1. rds_sendmsg on first connect attempt sends the probe ping, with the
* sender's npaths (s_npaths)
* 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
* sends back a probe-pong with r_npaths. After that, if rcvr is the
* smaller ip addr, it starts rds_conn_path_connect_if_down on all
* mprds_paths.
* 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
* If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
* called after reception of the probe-pong on all mprds_paths.
* Otherwise (sender of probe-ping is not the smaller ip addr): just call
* rds_conn_path_connect_if_down on the hashed path. (see rule 4)
* 4. rds_connect_worker must only trigger a connection if laddr < faddr.
* 5. sender may end up queuing the packet on the cp. will get sent out later.
* when connection is completed.
*/
static void rds_start_mprds(struct rds_connection *conn)
{
int i;
struct rds_conn_path *cp;
if (conn->c_npaths > 1 &&
rds_addr_cmp(&conn->c_laddr, &conn->c_faddr) < 0) {
for (i = 0; i < conn->c_npaths; i++) {
cp = &conn->c_path[i];
rds_conn_path_connect_if_down(cp);
}
}
}
/*
* The transport must make sure that this is serialized against other
* rx and conn reset on this specific conn.
*
* We currently assert that only one fragmented message will be sent
* down a connection at a time. This lets us reassemble in the conn
* instead of per-flow which means that we don't have to go digging through
* flows to tear down partial reassembly progress on conn failure and
* we save flow lookup and locking for each frag arrival. It does mean
* that small messages will wait behind large ones. Fragmenting at all
* is only to reduce the memory consumption of pre-posted buffers.
*
* The caller passes in saddr and daddr instead of us getting it from the
* conn. This lets loopback, who only has one conn for both directions,
* tell us which roles the addrs in the conn are playing for this message.
*/
void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
struct in6_addr *daddr,
struct rds_incoming *inc, gfp_t gfp)
{
struct rds_sock *rs = NULL;
struct sock *sk;
unsigned long flags;
struct rds_conn_path *cp;
inc->i_conn = conn;
inc->i_rx_jiffies = jiffies;
if (conn->c_trans->t_mp_capable)
cp = inc->i_conn_path;
else
cp = &conn->c_path[0];
rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
"flags 0x%x rx_jiffies %lu\n", conn,
(unsigned long long)cp->cp_next_rx_seq,
inc,
(unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
be32_to_cpu(inc->i_hdr.h_len),
be16_to_cpu(inc->i_hdr.h_sport),
be16_to_cpu(inc->i_hdr.h_dport),
inc->i_hdr.h_flags,
inc->i_rx_jiffies);
/*
* Sequence numbers should only increase. Messages get their
* sequence number as they're queued in a sending conn. They
* can be dropped, though, if the sending socket is closed before
* they hit the wire. So sequence numbers can skip forward
* under normal operation. They can also drop back in the conn
* failover case as previously sent messages are resent down the
* new instance of a conn. We drop those, otherwise we have
* to assume that the next valid seq does not come after a
* hole in the fragment stream.
*
* The headers don't give us a way to realize if fragments of
* a message have been dropped. We assume that frags that arrive
* to a flow are part of the current message on the flow that is
* being reassembled. This means that senders can't drop messages
* from the sending conn until all their frags are sent.
*
* XXX we could spend more on the wire to get more robust failure
* detection, arguably worth it to avoid data corruption.
*/
if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
(inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
rds_stats_inc(s_recv_drop_old_seq);
goto out;
}
cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
if (inc->i_hdr.h_sport == 0) {
rdsdebug("ignore ping with 0 sport from %pI6c\n",
saddr);
goto out;
}
rds_stats_inc(s_recv_ping);
rds_send_pong(cp, inc->i_hdr.h_sport);
/* if this is a handshake ping, start multipath if necessary */
if (RDS_HS_PROBE(be16_to_cpu(inc->i_hdr.h_sport),
be16_to_cpu(inc->i_hdr.h_dport))) {
rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
rds_start_mprds(cp->cp_conn);
}
goto out;
}
if (be16_to_cpu(inc->i_hdr.h_dport) == RDS_FLAG_PROBE_PORT &&
inc->i_hdr.h_sport == 0) {
rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
/* if this is a handshake pong, start multipath if necessary */
rds_start_mprds(cp->cp_conn);
wake_up(&cp->cp_conn->c_hs_waitq);
goto out;
}
rs = rds_find_bound(daddr, inc->i_hdr.h_dport, conn->c_bound_if);
if (!rs) {
rds_stats_inc(s_recv_drop_no_sock);
goto out;
}
/* Process extension headers */
rds_recv_incoming_exthdrs(inc, rs);
/* We can be racing with rds_release() which marks the socket dead. */
sk = rds_rs_to_sk(rs);
/* serialize with rds_release -> sock_orphan */
write_lock_irqsave(&rs->rs_recv_lock, flags);
if (!sock_flag(sk, SOCK_DEAD)) {
rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
rds_stats_inc(s_recv_queued);
rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
be32_to_cpu(inc->i_hdr.h_len),
inc->i_hdr.h_dport);
if (sock_flag(sk, SOCK_RCVTSTAMP))
inc->i_usercopy.rx_tstamp = ktime_get_real();
rds_inc_addref(inc);
inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
list_add_tail(&inc->i_item, &rs->rs_recv_queue);
__rds_wake_sk_sleep(sk);
} else {
rds_stats_inc(s_recv_drop_dead_sock);
}
write_unlock_irqrestore(&rs->rs_recv_lock, flags);
out:
if (rs)
rds_sock_put(rs);
}
EXPORT_SYMBOL_GPL(rds_recv_incoming);
/*
* be very careful here. This is being called as the condition in
* wait_event_*() needs to cope with being called many times.
*/
static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
{
unsigned long flags;
if (!*inc) {
read_lock_irqsave(&rs->rs_recv_lock, flags);
if (!list_empty(&rs->rs_recv_queue)) {
*inc = list_entry(rs->rs_recv_queue.next,
struct rds_incoming,
i_item);
rds_inc_addref(*inc);
}
read_unlock_irqrestore(&rs->rs_recv_lock, flags);
}
return *inc != NULL;
}
static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
int drop)
{
struct sock *sk = rds_rs_to_sk(rs);
int ret = 0;
unsigned long flags;
write_lock_irqsave(&rs->rs_recv_lock, flags);
if (!list_empty(&inc->i_item)) {
ret = 1;
if (drop) {
/* XXX make sure this i_conn is reliable */
rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
-be32_to_cpu(inc->i_hdr.h_len),
inc->i_hdr.h_dport);
list_del_init(&inc->i_item);
rds_inc_put(inc);
}
}
write_unlock_irqrestore(&rs->rs_recv_lock, flags);
rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
return ret;
}
/*
* Pull errors off the error queue.
* If msghdr is NULL, we will just purge the error queue.
*/
int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
{
struct rds_notifier *notifier;
struct rds_rdma_notify cmsg;
unsigned int count = 0, max_messages = ~0U;
unsigned long flags;
LIST_HEAD(copy);
int err = 0;
memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */
/* put_cmsg copies to user space and thus may sleep. We can't do this
* with rs_lock held, so first grab as many notifications as we can stuff
* in the user provided cmsg buffer. We don't try to copy more, to avoid
* losing notifications - except when the buffer is so small that it wouldn't
* even hold a single notification. Then we give him as much of this single
* msg as we can squeeze in, and set MSG_CTRUNC.
*/
if (msghdr) {
max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
if (!max_messages)
max_messages = 1;
}
spin_lock_irqsave(&rs->rs_lock, flags);
while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
notifier = list_entry(rs->rs_notify_queue.next,
struct rds_notifier, n_list);
list_move(¬ifier->n_list, ©);
count++;
}
spin_unlock_irqrestore(&rs->rs_lock, flags);
if (!count)
return 0;
while (!list_empty(©)) {
notifier = list_entry(copy.next, struct rds_notifier, n_list);
if (msghdr) {
cmsg.user_token = notifier->n_user_token;
cmsg.status = notifier->n_status;
err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
sizeof(cmsg), &cmsg);
if (err)
break;
}
list_del_init(¬ifier->n_list);
kfree(notifier);
}
/* If we bailed out because of an error in put_cmsg,
* we may be left with one or more notifications that we
* didn't process. Return them to the head of the list. */
if (!list_empty(©)) {
spin_lock_irqsave(&rs->rs_lock, flags);
list_splice(©, &rs->rs_notify_queue);
spin_unlock_irqrestore(&rs->rs_lock, flags);
}
return err;
}
/*
* Queue a congestion notification
*/
static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
{
uint64_t notify = rs->rs_cong_notify;
unsigned long flags;
int err;
err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
sizeof(notify), ¬ify);
if (err)
return err;
spin_lock_irqsave(&rs->rs_lock, flags);
rs->rs_cong_notify &= ~notify;
spin_unlock_irqrestore(&rs->rs_lock, flags);
return 0;
}
/*
* Receive any control messages.
*/
static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
struct rds_sock *rs)
{
int ret = 0;
if (inc->i_usercopy.rdma_cookie) {
ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
sizeof(inc->i_usercopy.rdma_cookie),
&inc->i_usercopy.rdma_cookie);
if (ret)
goto out;
}
if ((inc->i_usercopy.rx_tstamp != 0) &&
sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
struct __kernel_old_timeval tv =
ns_to_kernel_old_timeval(inc->i_usercopy.rx_tstamp);
if (!sock_flag(rds_rs_to_sk(rs), SOCK_TSTAMP_NEW)) {
ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
sizeof(tv), &tv);
} else {
struct __kernel_sock_timeval sk_tv;
sk_tv.tv_sec = tv.tv_sec;
sk_tv.tv_usec = tv.tv_usec;
ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
sizeof(sk_tv), &sk_tv);
}
if (ret)
goto out;
}
if (rs->rs_rx_traces) {
struct rds_cmsg_rx_trace t;
int i, j;
memset(&t, 0, sizeof(t));
inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
t.rx_traces = rs->rs_rx_traces;
for (i = 0; i < rs->rs_rx_traces; i++) {
j = rs->rs_rx_trace[i];
t.rx_trace_pos[i] = j;
t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
inc->i_rx_lat_trace[j];
}
ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY,
sizeof(t), &t);
if (ret)
goto out;
}
out:
return ret;
}
static bool rds_recvmsg_zcookie(struct rds_sock *rs, struct msghdr *msg)
{
struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
struct rds_msg_zcopy_info *info = NULL;
struct rds_zcopy_cookies *done;
unsigned long flags;
if (!msg->msg_control)
return false;
if (!sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY) ||
msg->msg_controllen < CMSG_SPACE(sizeof(*done)))
return false;
spin_lock_irqsave(&q->lock, flags);
if (!list_empty(&q->zcookie_head)) {
info = list_entry(q->zcookie_head.next,
struct rds_msg_zcopy_info, rs_zcookie_next);
list_del(&info->rs_zcookie_next);
}
spin_unlock_irqrestore(&q->lock, flags);
if (!info)
return false;
done = &info->zcookies;
if (put_cmsg(msg, SOL_RDS, RDS_CMSG_ZCOPY_COMPLETION, sizeof(*done),
done)) {
spin_lock_irqsave(&q->lock, flags);
list_add(&info->rs_zcookie_next, &q->zcookie_head);
spin_unlock_irqrestore(&q->lock, flags);
return false;
}
kfree(info);
return true;
}
int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int msg_flags)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
long timeo;
int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct rds_incoming *inc = NULL;
/* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
timeo = sock_rcvtimeo(sk, nonblock);
rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
if (msg_flags & MSG_OOB)
goto out;
if (msg_flags & MSG_ERRQUEUE)
return sock_recv_errqueue(sk, msg, size, SOL_IP, IP_RECVERR);
while (1) {
/* If there are pending notifications, do those - and nothing else */
if (!list_empty(&rs->rs_notify_queue)) {
ret = rds_notify_queue_get(rs, msg);
break;
}
if (rs->rs_cong_notify) {
ret = rds_notify_cong(rs, msg);
break;
}
if (!rds_next_incoming(rs, &inc)) {
if (nonblock) {
bool reaped = rds_recvmsg_zcookie(rs, msg);
ret = reaped ? 0 : -EAGAIN;
break;
}
timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
(!list_empty(&rs->rs_notify_queue) ||
rs->rs_cong_notify ||
rds_next_incoming(rs, &inc)), timeo);
rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
timeo);
if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
continue;
ret = timeo;
if (ret == 0)
ret = -ETIMEDOUT;
break;
}
rdsdebug("copying inc %p from %pI6c:%u to user\n", inc,
&inc->i_conn->c_faddr,
ntohs(inc->i_hdr.h_sport));
ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
if (ret < 0)
break;
/*
* if the message we just copied isn't at the head of the
* recv queue then someone else raced us to return it, try
* to get the next message.
*/
if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
rds_inc_put(inc);
inc = NULL;
rds_stats_inc(s_recv_deliver_raced);
iov_iter_revert(&msg->msg_iter, ret);
continue;
}
if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
if (msg_flags & MSG_TRUNC)
ret = be32_to_cpu(inc->i_hdr.h_len);
msg->msg_flags |= MSG_TRUNC;
}
if (rds_cmsg_recv(inc, msg, rs)) {
ret = -EFAULT;
break;
}
rds_recvmsg_zcookie(rs, msg);
rds_stats_inc(s_recv_delivered);
if (msg->msg_name) {
if (ipv6_addr_v4mapped(&inc->i_saddr)) {
sin->sin_family = AF_INET;
sin->sin_port = inc->i_hdr.h_sport;
sin->sin_addr.s_addr =
inc->i_saddr.s6_addr32[3];
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
msg->msg_namelen = sizeof(*sin);
} else {
sin6->sin6_family = AF_INET6;
sin6->sin6_port = inc->i_hdr.h_sport;
sin6->sin6_addr = inc->i_saddr;
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = rs->rs_bound_scope_id;
msg->msg_namelen = sizeof(*sin6);
}
}
break;
}
if (inc)
rds_inc_put(inc);
out:
return ret;
}
/*
* The socket is being shut down and we're asked to drop messages that were
* queued for recvmsg. The caller has unbound the socket so the receive path
* won't queue any more incoming fragments or messages on the socket.
*/
void rds_clear_recv_queue(struct rds_sock *rs)
{
struct sock *sk = rds_rs_to_sk(rs);
struct rds_incoming *inc, *tmp;
unsigned long flags;
write_lock_irqsave(&rs->rs_recv_lock, flags);
list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
-be32_to_cpu(inc->i_hdr.h_len),
inc->i_hdr.h_dport);
list_del_init(&inc->i_item);
rds_inc_put(inc);
}
write_unlock_irqrestore(&rs->rs_recv_lock, flags);
}
/*
* inc->i_saddr isn't used here because it is only set in the receive
* path.
*/
void rds_inc_info_copy(struct rds_incoming *inc,
struct rds_info_iterator *iter,
__be32 saddr, __be32 daddr, int flip)
{
struct rds_info_message minfo;
minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
minfo.len = be32_to_cpu(inc->i_hdr.h_len);
minfo.tos = inc->i_conn->c_tos;
if (flip) {
minfo.laddr = daddr;
minfo.faddr = saddr;
minfo.lport = inc->i_hdr.h_dport;
minfo.fport = inc->i_hdr.h_sport;
} else {
minfo.laddr = saddr;
minfo.faddr = daddr;
minfo.lport = inc->i_hdr.h_sport;
minfo.fport = inc->i_hdr.h_dport;
}
minfo.flags = 0;
rds_info_copy(iter, &minfo, sizeof(minfo));
}
#if IS_ENABLED(CONFIG_IPV6)
void rds6_inc_info_copy(struct rds_incoming *inc,
struct rds_info_iterator *iter,
struct in6_addr *saddr, struct in6_addr *daddr,
int flip)
{
struct rds6_info_message minfo6;
minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
minfo6.tos = inc->i_conn->c_tos;
if (flip) {
minfo6.laddr = *daddr;
minfo6.faddr = *saddr;
minfo6.lport = inc->i_hdr.h_dport;
minfo6.fport = inc->i_hdr.h_sport;
} else {
minfo6.laddr = *saddr;
minfo6.faddr = *daddr;
minfo6.lport = inc->i_hdr.h_sport;
minfo6.fport = inc->i_hdr.h_dport;
}
minfo6.flags = 0;
rds_info_copy(iter, &minfo6, sizeof(minfo6));
}
#endif
| linux-master | net/rds/recv.c |
/*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/cpu.h>
#include <linux/export.h>
#include "rds.h"
struct rds_page_remainder {
struct page *r_page;
unsigned long r_offset;
};
static
DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders);
/**
* rds_page_remainder_alloc - build up regions of a message.
*
* @scat: Scatter list for message
* @bytes: the number of bytes needed.
* @gfp: the waiting behaviour of the allocation
*
* @gfp is always ored with __GFP_HIGHMEM. Callers must be prepared to
* kmap the pages, etc.
*
* If @bytes is at least a full page then this just returns a page from
* alloc_page().
*
* If @bytes is a partial page then this stores the unused region of the
* page in a per-cpu structure. Future partial-page allocations may be
* satisfied from that cached region. This lets us waste less memory on
* small allocations with minimal complexity. It works because the transmit
* path passes read-only page regions down to devices. They hold a page
* reference until they are done with the region.
*/
int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
gfp_t gfp)
{
struct rds_page_remainder *rem;
unsigned long flags;
struct page *page;
int ret;
gfp |= __GFP_HIGHMEM;
/* jump straight to allocation if we're trying for a huge page */
if (bytes >= PAGE_SIZE) {
page = alloc_page(gfp);
if (!page) {
ret = -ENOMEM;
} else {
sg_set_page(scat, page, PAGE_SIZE, 0);
ret = 0;
}
goto out;
}
rem = &per_cpu(rds_page_remainders, get_cpu());
local_irq_save(flags);
while (1) {
/* avoid a tiny region getting stuck by tossing it */
if (rem->r_page && bytes > (PAGE_SIZE - rem->r_offset)) {
rds_stats_inc(s_page_remainder_miss);
__free_page(rem->r_page);
rem->r_page = NULL;
}
/* hand out a fragment from the cached page */
if (rem->r_page && bytes <= (PAGE_SIZE - rem->r_offset)) {
sg_set_page(scat, rem->r_page, bytes, rem->r_offset);
get_page(sg_page(scat));
if (rem->r_offset != 0)
rds_stats_inc(s_page_remainder_hit);
rem->r_offset += ALIGN(bytes, 8);
if (rem->r_offset >= PAGE_SIZE) {
__free_page(rem->r_page);
rem->r_page = NULL;
}
ret = 0;
break;
}
/* alloc if there is nothing for us to use */
local_irq_restore(flags);
put_cpu();
page = alloc_page(gfp);
rem = &per_cpu(rds_page_remainders, get_cpu());
local_irq_save(flags);
if (!page) {
ret = -ENOMEM;
break;
}
/* did someone race to fill the remainder before us? */
if (rem->r_page) {
__free_page(page);
continue;
}
/* otherwise install our page and loop around to alloc */
rem->r_page = page;
rem->r_offset = 0;
}
local_irq_restore(flags);
put_cpu();
out:
rdsdebug("bytes %lu ret %d %p %u %u\n", bytes, ret,
ret ? NULL : sg_page(scat), ret ? 0 : scat->offset,
ret ? 0 : scat->length);
return ret;
}
EXPORT_SYMBOL_GPL(rds_page_remainder_alloc);
void rds_page_exit(void)
{
unsigned int cpu;
for_each_possible_cpu(cpu) {
struct rds_page_remainder *rem;
rem = &per_cpu(rds_page_remainders, cpu);
rdsdebug("cpu %u\n", cpu);
if (rem->r_page)
__free_page(rem->r_page);
rem->r_page = NULL;
}
}
| linux-master | net/rds/page.c |
/*
* Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/ratelimit.h>
#include <net/addrconf.h>
#include <rdma/ib_cm.h>
#include "rds_single_path.h"
#include "rds.h"
#include "ib.h"
#include "ib_mr.h"
/*
* Set the selected protocol version
*/
static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
{
conn->c_version = version;
}
/*
* Set up flow control
*/
static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
{
struct rds_ib_connection *ic = conn->c_transport_data;
if (rds_ib_sysctl_flow_control && credits != 0) {
/* We're doing flow control */
ic->i_flowctl = 1;
rds_ib_send_add_credits(conn, credits);
} else {
ic->i_flowctl = 0;
}
}
/*
* Connection established.
* We get here for both outgoing and incoming connection.
*/
void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
{
struct rds_ib_connection *ic = conn->c_transport_data;
const union rds_ib_conn_priv *dp = NULL;
__be64 ack_seq = 0;
__be32 credit = 0;
u8 major = 0;
u8 minor = 0;
int err;
dp = event->param.conn.private_data;
if (conn->c_isv6) {
if (event->param.conn.private_data_len >=
sizeof(struct rds6_ib_connect_private)) {
major = dp->ricp_v6.dp_protocol_major;
minor = dp->ricp_v6.dp_protocol_minor;
credit = dp->ricp_v6.dp_credit;
/* dp structure start is not guaranteed to be 8 bytes
* aligned. Since dp_ack_seq is 64-bit extended load
* operations can be used so go through get_unaligned
* to avoid unaligned errors.
*/
ack_seq = get_unaligned(&dp->ricp_v6.dp_ack_seq);
}
} else if (event->param.conn.private_data_len >=
sizeof(struct rds_ib_connect_private)) {
major = dp->ricp_v4.dp_protocol_major;
minor = dp->ricp_v4.dp_protocol_minor;
credit = dp->ricp_v4.dp_credit;
ack_seq = get_unaligned(&dp->ricp_v4.dp_ack_seq);
}
/* make sure it isn't empty data */
if (major) {
rds_ib_set_protocol(conn, RDS_PROTOCOL(major, minor));
rds_ib_set_flow_control(conn, be32_to_cpu(credit));
}
if (conn->c_version < RDS_PROTOCOL_VERSION) {
if (conn->c_version != RDS_PROTOCOL_COMPAT_VERSION) {
pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n",
&conn->c_laddr, &conn->c_faddr,
RDS_PROTOCOL_MAJOR(conn->c_version),
RDS_PROTOCOL_MINOR(conn->c_version));
rds_conn_destroy(conn);
return;
}
}
pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c,%d> version %u.%u%s\n",
ic->i_active_side ? "Active" : "Passive",
&conn->c_laddr, &conn->c_faddr, conn->c_tos,
RDS_PROTOCOL_MAJOR(conn->c_version),
RDS_PROTOCOL_MINOR(conn->c_version),
ic->i_flowctl ? ", flow control" : "");
/* receive sl from the peer */
ic->i_sl = ic->i_cm_id->route.path_rec->sl;
atomic_set(&ic->i_cq_quiesce, 0);
/* Init rings and fill recv. this needs to wait until protocol
* negotiation is complete, since ring layout is different
* from 3.1 to 4.1.
*/
rds_ib_send_init_ring(ic);
rds_ib_recv_init_ring(ic);
/* Post receive buffers - as a side effect, this will update
* the posted credit count. */
rds_ib_recv_refill(conn, 1, GFP_KERNEL);
/* update ib_device with this local ipaddr */
err = rds_ib_update_ipaddr(ic->rds_ibdev, &conn->c_laddr);
if (err)
printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
err);
/* If the peer gave us the last packet it saw, process this as if
* we had received a regular ACK. */
if (dp) {
if (ack_seq)
rds_send_drop_acked(conn, be64_to_cpu(ack_seq),
NULL);
}
conn->c_proposed_version = conn->c_version;
rds_connect_complete(conn);
}
static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
struct rdma_conn_param *conn_param,
union rds_ib_conn_priv *dp,
u32 protocol_version,
u32 max_responder_resources,
u32 max_initiator_depth,
bool isv6)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
memset(conn_param, 0, sizeof(struct rdma_conn_param));
conn_param->responder_resources =
min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
conn_param->initiator_depth =
min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
conn_param->rnr_retry_count = 7;
if (dp) {
memset(dp, 0, sizeof(*dp));
if (isv6) {
dp->ricp_v6.dp_saddr = conn->c_laddr;
dp->ricp_v6.dp_daddr = conn->c_faddr;
dp->ricp_v6.dp_protocol_major =
RDS_PROTOCOL_MAJOR(protocol_version);
dp->ricp_v6.dp_protocol_minor =
RDS_PROTOCOL_MINOR(protocol_version);
dp->ricp_v6.dp_protocol_minor_mask =
cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
dp->ricp_v6.dp_ack_seq =
cpu_to_be64(rds_ib_piggyb_ack(ic));
dp->ricp_v6.dp_cmn.ricpc_dp_toss = conn->c_tos;
conn_param->private_data = &dp->ricp_v6;
conn_param->private_data_len = sizeof(dp->ricp_v6);
} else {
dp->ricp_v4.dp_saddr = conn->c_laddr.s6_addr32[3];
dp->ricp_v4.dp_daddr = conn->c_faddr.s6_addr32[3];
dp->ricp_v4.dp_protocol_major =
RDS_PROTOCOL_MAJOR(protocol_version);
dp->ricp_v4.dp_protocol_minor =
RDS_PROTOCOL_MINOR(protocol_version);
dp->ricp_v4.dp_protocol_minor_mask =
cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
dp->ricp_v4.dp_ack_seq =
cpu_to_be64(rds_ib_piggyb_ack(ic));
dp->ricp_v4.dp_cmn.ricpc_dp_toss = conn->c_tos;
conn_param->private_data = &dp->ricp_v4;
conn_param->private_data_len = sizeof(dp->ricp_v4);
}
/* Advertise flow control */
if (ic->i_flowctl) {
unsigned int credits;
credits = IB_GET_POST_CREDITS
(atomic_read(&ic->i_credits));
if (isv6)
dp->ricp_v6.dp_credit = cpu_to_be32(credits);
else
dp->ricp_v4.dp_credit = cpu_to_be32(credits);
atomic_sub(IB_SET_POST_CREDITS(credits),
&ic->i_credits);
}
}
}
static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
{
rdsdebug("event %u (%s) data %p\n",
event->event, ib_event_msg(event->event), data);
}
/* Plucking the oldest entry from the ring can be done concurrently with
* the thread refilling the ring. Each ring operation is protected by
* spinlocks and the transient state of refilling doesn't change the
* recording of which entry is oldest.
*
* This relies on IB only calling one cq comp_handler for each cq so that
* there will only be one caller of rds_recv_incoming() per RDS connection.
*/
static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
{
struct rds_connection *conn = context;
struct rds_ib_connection *ic = conn->c_transport_data;
rdsdebug("conn %p cq %p\n", conn, cq);
rds_ib_stats_inc(s_ib_evt_handler_call);
tasklet_schedule(&ic->i_recv_tasklet);
}
static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq,
struct ib_wc *wcs)
{
int nr, i;
struct ib_wc *wc;
while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
for (i = 0; i < nr; i++) {
wc = wcs + i;
rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
(unsigned long long)wc->wr_id, wc->status,
wc->byte_len, be32_to_cpu(wc->ex.imm_data));
if (wc->wr_id <= ic->i_send_ring.w_nr ||
wc->wr_id == RDS_IB_ACK_WR_ID)
rds_ib_send_cqe_handler(ic, wc);
else
rds_ib_mr_cqe_handler(ic, wc);
}
}
}
static void rds_ib_tasklet_fn_send(unsigned long data)
{
struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
struct rds_connection *conn = ic->conn;
rds_ib_stats_inc(s_ib_tasklet_call);
/* if cq has been already reaped, ignore incoming cq event */
if (atomic_read(&ic->i_cq_quiesce))
return;
poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
if (rds_conn_up(conn) &&
(!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
test_bit(0, &conn->c_map_queued)))
rds_send_xmit(&ic->conn->c_path[0]);
}
static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
struct ib_wc *wcs,
struct rds_ib_ack_state *ack_state)
{
int nr, i;
struct ib_wc *wc;
while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
for (i = 0; i < nr; i++) {
wc = wcs + i;
rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
(unsigned long long)wc->wr_id, wc->status,
wc->byte_len, be32_to_cpu(wc->ex.imm_data));
rds_ib_recv_cqe_handler(ic, wc, ack_state);
}
}
}
static void rds_ib_tasklet_fn_recv(unsigned long data)
{
struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
struct rds_connection *conn = ic->conn;
struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
struct rds_ib_ack_state state;
if (!rds_ibdev)
rds_conn_drop(conn);
rds_ib_stats_inc(s_ib_tasklet_call);
/* if cq has been already reaped, ignore incoming cq event */
if (atomic_read(&ic->i_cq_quiesce))
return;
memset(&state, 0, sizeof(state));
poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
if (state.ack_next_valid)
rds_ib_set_ack(ic, state.ack_next, state.ack_required);
if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
rds_send_drop_acked(conn, state.ack_recv, NULL);
ic->i_ack_recv = state.ack_recv;
}
if (rds_conn_up(conn))
rds_ib_attempt_ack(ic);
}
static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
{
struct rds_connection *conn = data;
struct rds_ib_connection *ic = conn->c_transport_data;
rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
ib_event_msg(event->event));
switch (event->event) {
case IB_EVENT_COMM_EST:
rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
break;
default:
rdsdebug("Fatal QP Event %u (%s) - connection %pI6c->%pI6c, reconnecting\n",
event->event, ib_event_msg(event->event),
&conn->c_laddr, &conn->c_faddr);
rds_conn_drop(conn);
break;
}
}
static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
{
struct rds_connection *conn = context;
struct rds_ib_connection *ic = conn->c_transport_data;
rdsdebug("conn %p cq %p\n", conn, cq);
rds_ib_stats_inc(s_ib_evt_handler_call);
tasklet_schedule(&ic->i_send_tasklet);
}
static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev)
{
int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1];
int index = rds_ibdev->dev->num_comp_vectors - 1;
int i;
for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) {
if (rds_ibdev->vector_load[i] < min) {
index = i;
min = rds_ibdev->vector_load[i];
}
}
rds_ibdev->vector_load[index]++;
return index;
}
static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index)
{
rds_ibdev->vector_load[index]--;
}
static void rds_dma_hdr_free(struct ib_device *dev, struct rds_header *hdr,
dma_addr_t dma_addr, enum dma_data_direction dir)
{
ib_dma_unmap_single(dev, dma_addr, sizeof(*hdr), dir);
kfree(hdr);
}
static struct rds_header *rds_dma_hdr_alloc(struct ib_device *dev,
dma_addr_t *dma_addr, enum dma_data_direction dir)
{
struct rds_header *hdr;
hdr = kzalloc_node(sizeof(*hdr), GFP_KERNEL, ibdev_to_node(dev));
if (!hdr)
return NULL;
*dma_addr = ib_dma_map_single(dev, hdr, sizeof(*hdr),
DMA_BIDIRECTIONAL);
if (ib_dma_mapping_error(dev, *dma_addr)) {
kfree(hdr);
return NULL;
}
return hdr;
}
/* Free the DMA memory used to store struct rds_header.
*
* @dev: the RDS IB device
* @hdrs: pointer to the array storing DMA memory pointers
* @dma_addrs: pointer to the array storing DMA addresses
* @num_hdars: number of headers to free.
*/
static void rds_dma_hdrs_free(struct rds_ib_device *dev,
struct rds_header **hdrs, dma_addr_t *dma_addrs, u32 num_hdrs,
enum dma_data_direction dir)
{
u32 i;
for (i = 0; i < num_hdrs; i++)
rds_dma_hdr_free(dev->dev, hdrs[i], dma_addrs[i], dir);
kvfree(hdrs);
kvfree(dma_addrs);
}
/* Allocate DMA coherent memory to be used to store struct rds_header for
* sending/receiving packets. The pointers to the DMA memory and the
* associated DMA addresses are stored in two arrays.
*
* @dev: the RDS IB device
* @dma_addrs: pointer to the array for storing DMA addresses
* @num_hdrs: number of headers to allocate
*
* It returns the pointer to the array storing the DMA memory pointers. On
* error, NULL pointer is returned.
*/
static struct rds_header **rds_dma_hdrs_alloc(struct rds_ib_device *dev,
dma_addr_t **dma_addrs, u32 num_hdrs,
enum dma_data_direction dir)
{
struct rds_header **hdrs;
dma_addr_t *hdr_daddrs;
u32 i;
hdrs = kvmalloc_node(sizeof(*hdrs) * num_hdrs, GFP_KERNEL,
ibdev_to_node(dev->dev));
if (!hdrs)
return NULL;
hdr_daddrs = kvmalloc_node(sizeof(*hdr_daddrs) * num_hdrs, GFP_KERNEL,
ibdev_to_node(dev->dev));
if (!hdr_daddrs) {
kvfree(hdrs);
return NULL;
}
for (i = 0; i < num_hdrs; i++) {
hdrs[i] = rds_dma_hdr_alloc(dev->dev, &hdr_daddrs[i], dir);
if (!hdrs[i]) {
rds_dma_hdrs_free(dev, hdrs, hdr_daddrs, i, dir);
return NULL;
}
}
*dma_addrs = hdr_daddrs;
return hdrs;
}
/*
* This needs to be very careful to not leave IS_ERR pointers around for
* cleanup to trip over.
*/
static int rds_ib_setup_qp(struct rds_connection *conn)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_device *dev = ic->i_cm_id->device;
struct ib_qp_init_attr attr;
struct ib_cq_init_attr cq_attr = {};
struct rds_ib_device *rds_ibdev;
unsigned long max_wrs;
int ret, fr_queue_space;
/*
* It's normal to see a null device if an incoming connection races
* with device removal, so we don't print a warning.
*/
rds_ibdev = rds_ib_get_client_data(dev);
if (!rds_ibdev)
return -EOPNOTSUPP;
/* The fr_queue_space is currently set to 512, to add extra space on
* completion queue and send queue. This extra space is used for FRWR
* registration and invalidation work requests
*/
fr_queue_space = RDS_IB_DEFAULT_FR_WR;
/* add the conn now so that connection establishment has the dev */
rds_ib_add_conn(rds_ibdev, conn);
max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_send_wr + 1 ?
rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_send_wr;
if (ic->i_send_ring.w_nr != max_wrs)
rds_ib_ring_resize(&ic->i_send_ring, max_wrs);
max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_recv_wr + 1 ?
rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_recv_wr;
if (ic->i_recv_ring.w_nr != max_wrs)
rds_ib_ring_resize(&ic->i_recv_ring, max_wrs);
/* Protection domain and memory range */
ic->i_pd = rds_ibdev->pd;
ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev);
cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1;
cq_attr.comp_vector = ic->i_scq_vector;
ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
rds_ib_cq_event_handler, conn,
&cq_attr);
if (IS_ERR(ic->i_send_cq)) {
ret = PTR_ERR(ic->i_send_cq);
ic->i_send_cq = NULL;
ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
rdsdebug("ib_create_cq send failed: %d\n", ret);
goto rds_ibdev_out;
}
ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
cq_attr.cqe = ic->i_recv_ring.w_nr;
cq_attr.comp_vector = ic->i_rcq_vector;
ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
rds_ib_cq_event_handler, conn,
&cq_attr);
if (IS_ERR(ic->i_recv_cq)) {
ret = PTR_ERR(ic->i_recv_cq);
ic->i_recv_cq = NULL;
ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
rdsdebug("ib_create_cq recv failed: %d\n", ret);
goto send_cq_out;
}
ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
if (ret) {
rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
goto recv_cq_out;
}
ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
if (ret) {
rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
goto recv_cq_out;
}
/* XXX negotiate max send/recv with remote? */
memset(&attr, 0, sizeof(attr));
attr.event_handler = rds_ib_qp_event_handler;
attr.qp_context = conn;
/* + 1 to allow for the single ack message */
attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1;
attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
attr.cap.max_send_sge = rds_ibdev->max_sge;
attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
attr.send_cq = ic->i_send_cq;
attr.recv_cq = ic->i_recv_cq;
/*
* XXX this can fail if max_*_wr is too large? Are we supposed
* to back off until we get a value that the hardware can support?
*/
ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
if (ret) {
rdsdebug("rdma_create_qp failed: %d\n", ret);
goto recv_cq_out;
}
ic->i_send_hdrs = rds_dma_hdrs_alloc(rds_ibdev, &ic->i_send_hdrs_dma,
ic->i_send_ring.w_nr,
DMA_TO_DEVICE);
if (!ic->i_send_hdrs) {
ret = -ENOMEM;
rdsdebug("DMA send hdrs alloc failed\n");
goto qp_out;
}
ic->i_recv_hdrs = rds_dma_hdrs_alloc(rds_ibdev, &ic->i_recv_hdrs_dma,
ic->i_recv_ring.w_nr,
DMA_FROM_DEVICE);
if (!ic->i_recv_hdrs) {
ret = -ENOMEM;
rdsdebug("DMA recv hdrs alloc failed\n");
goto send_hdrs_dma_out;
}
ic->i_ack = rds_dma_hdr_alloc(rds_ibdev->dev, &ic->i_ack_dma,
DMA_TO_DEVICE);
if (!ic->i_ack) {
ret = -ENOMEM;
rdsdebug("DMA ack header alloc failed\n");
goto recv_hdrs_dma_out;
}
ic->i_sends = vzalloc_node(array_size(sizeof(struct rds_ib_send_work),
ic->i_send_ring.w_nr),
ibdev_to_node(dev));
if (!ic->i_sends) {
ret = -ENOMEM;
rdsdebug("send allocation failed\n");
goto ack_dma_out;
}
ic->i_recvs = vzalloc_node(array_size(sizeof(struct rds_ib_recv_work),
ic->i_recv_ring.w_nr),
ibdev_to_node(dev));
if (!ic->i_recvs) {
ret = -ENOMEM;
rdsdebug("recv allocation failed\n");
goto sends_out;
}
rds_ib_recv_init_ack(ic);
rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
ic->i_send_cq, ic->i_recv_cq);
goto out;
sends_out:
vfree(ic->i_sends);
ack_dma_out:
rds_dma_hdr_free(rds_ibdev->dev, ic->i_ack, ic->i_ack_dma,
DMA_TO_DEVICE);
ic->i_ack = NULL;
recv_hdrs_dma_out:
rds_dma_hdrs_free(rds_ibdev, ic->i_recv_hdrs, ic->i_recv_hdrs_dma,
ic->i_recv_ring.w_nr, DMA_FROM_DEVICE);
ic->i_recv_hdrs = NULL;
ic->i_recv_hdrs_dma = NULL;
send_hdrs_dma_out:
rds_dma_hdrs_free(rds_ibdev, ic->i_send_hdrs, ic->i_send_hdrs_dma,
ic->i_send_ring.w_nr, DMA_TO_DEVICE);
ic->i_send_hdrs = NULL;
ic->i_send_hdrs_dma = NULL;
qp_out:
rdma_destroy_qp(ic->i_cm_id);
recv_cq_out:
ib_destroy_cq(ic->i_recv_cq);
ic->i_recv_cq = NULL;
send_cq_out:
ib_destroy_cq(ic->i_send_cq);
ic->i_send_cq = NULL;
rds_ibdev_out:
rds_ib_remove_conn(rds_ibdev, conn);
out:
rds_ib_dev_put(rds_ibdev);
return ret;
}
static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6)
{
const union rds_ib_conn_priv *dp = event->param.conn.private_data;
u8 data_len, major, minor;
u32 version = 0;
__be16 mask;
u16 common;
/*
* rdma_cm private data is odd - when there is any private data in the
* request, we will be given a pretty large buffer without telling us the
* original size. The only way to tell the difference is by looking at
* the contents, which are initialized to zero.
* If the protocol version fields aren't set, this is a connection attempt
* from an older version. This could be 3.0 or 2.0 - we can't tell.
* We really should have changed this for OFED 1.3 :-(
*/
/* Be paranoid. RDS always has privdata */
if (!event->param.conn.private_data_len) {
printk(KERN_NOTICE "RDS incoming connection has no private data, "
"rejecting\n");
return 0;
}
if (isv6) {
data_len = sizeof(struct rds6_ib_connect_private);
major = dp->ricp_v6.dp_protocol_major;
minor = dp->ricp_v6.dp_protocol_minor;
mask = dp->ricp_v6.dp_protocol_minor_mask;
} else {
data_len = sizeof(struct rds_ib_connect_private);
major = dp->ricp_v4.dp_protocol_major;
minor = dp->ricp_v4.dp_protocol_minor;
mask = dp->ricp_v4.dp_protocol_minor_mask;
}
/* Even if len is crap *now* I still want to check it. -ASG */
if (event->param.conn.private_data_len < data_len || major == 0)
return RDS_PROTOCOL_4_0;
common = be16_to_cpu(mask) & RDS_IB_SUPPORTED_PROTOCOLS;
if (major == 4 && common) {
version = RDS_PROTOCOL_4_0;
while ((common >>= 1) != 0)
version++;
} else if (RDS_PROTOCOL_COMPAT_VERSION ==
RDS_PROTOCOL(major, minor)) {
version = RDS_PROTOCOL_COMPAT_VERSION;
} else {
if (isv6)
printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI6c using incompatible protocol version %u.%u\n",
&dp->ricp_v6.dp_saddr, major, minor);
else
printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
&dp->ricp_v4.dp_saddr, major, minor);
}
return version;
}
#if IS_ENABLED(CONFIG_IPV6)
/* Given an IPv6 address, find the net_device which hosts that address and
* return its index. This is used by the rds_ib_cm_handle_connect() code to
* find the interface index of where an incoming request comes from when
* the request is using a link local address.
*
* Note one problem in this search. It is possible that two interfaces have
* the same link local address. Unfortunately, this cannot be solved unless
* the underlying layer gives us the interface which an incoming RDMA connect
* request comes from.
*/
static u32 __rds_find_ifindex(struct net *net, const struct in6_addr *addr)
{
struct net_device *dev;
int idx = 0;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
if (ipv6_chk_addr(net, addr, dev, 1)) {
idx = dev->ifindex;
break;
}
}
rcu_read_unlock();
return idx;
}
#endif
int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event, bool isv6)
{
__be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
__be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
const struct rds_ib_conn_priv_cmn *dp_cmn;
struct rds_connection *conn = NULL;
struct rds_ib_connection *ic = NULL;
struct rdma_conn_param conn_param;
const union rds_ib_conn_priv *dp;
union rds_ib_conn_priv dp_rep;
struct in6_addr s_mapped_addr;
struct in6_addr d_mapped_addr;
const struct in6_addr *saddr6;
const struct in6_addr *daddr6;
int destroy = 1;
u32 ifindex = 0;
u32 version;
int err = 1;
/* Check whether the remote protocol version matches ours. */
version = rds_ib_protocol_compatible(event, isv6);
if (!version) {
err = RDS_RDMA_REJ_INCOMPAT;
goto out;
}
dp = event->param.conn.private_data;
if (isv6) {
#if IS_ENABLED(CONFIG_IPV6)
dp_cmn = &dp->ricp_v6.dp_cmn;
saddr6 = &dp->ricp_v6.dp_saddr;
daddr6 = &dp->ricp_v6.dp_daddr;
/* If either address is link local, need to find the
* interface index in order to create a proper RDS
* connection.
*/
if (ipv6_addr_type(daddr6) & IPV6_ADDR_LINKLOCAL) {
/* Using init_net for now .. */
ifindex = __rds_find_ifindex(&init_net, daddr6);
/* No index found... Need to bail out. */
if (ifindex == 0) {
err = -EOPNOTSUPP;
goto out;
}
} else if (ipv6_addr_type(saddr6) & IPV6_ADDR_LINKLOCAL) {
/* Use our address to find the correct index. */
ifindex = __rds_find_ifindex(&init_net, daddr6);
/* No index found... Need to bail out. */
if (ifindex == 0) {
err = -EOPNOTSUPP;
goto out;
}
}
#else
err = -EOPNOTSUPP;
goto out;
#endif
} else {
dp_cmn = &dp->ricp_v4.dp_cmn;
ipv6_addr_set_v4mapped(dp->ricp_v4.dp_saddr, &s_mapped_addr);
ipv6_addr_set_v4mapped(dp->ricp_v4.dp_daddr, &d_mapped_addr);
saddr6 = &s_mapped_addr;
daddr6 = &d_mapped_addr;
}
rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid 0x%llx, tos:%d\n",
saddr6, daddr6, RDS_PROTOCOL_MAJOR(version),
RDS_PROTOCOL_MINOR(version),
(unsigned long long)be64_to_cpu(lguid),
(unsigned long long)be64_to_cpu(fguid), dp_cmn->ricpc_dp_toss);
/* RDS/IB is not currently netns aware, thus init_net */
conn = rds_conn_create(&init_net, daddr6, saddr6,
&rds_ib_transport, dp_cmn->ricpc_dp_toss,
GFP_KERNEL, ifindex);
if (IS_ERR(conn)) {
rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
conn = NULL;
goto out;
}
/*
* The connection request may occur while the
* previous connection exist, e.g. in case of failover.
* But as connections may be initiated simultaneously
* by both hosts, we have a random backoff mechanism -
* see the comment above rds_queue_reconnect()
*/
mutex_lock(&conn->c_cm_lock);
if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
if (rds_conn_state(conn) == RDS_CONN_UP) {
rdsdebug("incoming connect while connecting\n");
rds_conn_drop(conn);
rds_ib_stats_inc(s_ib_listen_closed_stale);
} else
if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
/* Wait and see - our connect may still be succeeding */
rds_ib_stats_inc(s_ib_connect_raced);
}
goto out;
}
ic = conn->c_transport_data;
rds_ib_set_protocol(conn, version);
rds_ib_set_flow_control(conn, be32_to_cpu(dp_cmn->ricpc_credit));
/* If the peer gave us the last packet it saw, process this as if
* we had received a regular ACK. */
if (dp_cmn->ricpc_ack_seq)
rds_send_drop_acked(conn, be64_to_cpu(dp_cmn->ricpc_ack_seq),
NULL);
BUG_ON(cm_id->context);
BUG_ON(ic->i_cm_id);
ic->i_cm_id = cm_id;
cm_id->context = conn;
/* We got halfway through setting up the ib_connection, if we
* fail now, we have to take the long route out of this mess. */
destroy = 0;
err = rds_ib_setup_qp(conn);
if (err) {
rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
goto out;
}
rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
event->param.conn.responder_resources,
event->param.conn.initiator_depth, isv6);
rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32);
/* rdma_accept() calls rdma_reject() internally if it fails */
if (rdma_accept(cm_id, &conn_param))
rds_ib_conn_error(conn, "rdma_accept failed\n");
out:
if (conn)
mutex_unlock(&conn->c_cm_lock);
if (err)
rdma_reject(cm_id, &err, sizeof(int),
IB_CM_REJ_CONSUMER_DEFINED);
return destroy;
}
int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
{
struct rds_connection *conn = cm_id->context;
struct rds_ib_connection *ic = conn->c_transport_data;
struct rdma_conn_param conn_param;
union rds_ib_conn_priv dp;
int ret;
/* If the peer doesn't do protocol negotiation, we must
* default to RDSv3.0 */
rds_ib_set_protocol(conn, RDS_PROTOCOL_4_1);
ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */
ret = rds_ib_setup_qp(conn);
if (ret) {
rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret);
goto out;
}
rds_ib_cm_fill_conn_param(conn, &conn_param, &dp,
conn->c_proposed_version,
UINT_MAX, UINT_MAX, isv6);
ret = rdma_connect_locked(cm_id, &conn_param);
if (ret)
rds_ib_conn_error(conn, "rdma_connect_locked failed (%d)\n",
ret);
out:
/* Beware - returning non-zero tells the rdma_cm to destroy
* the cm_id. We should certainly not do it as long as we still
* "own" the cm_id. */
if (ret) {
if (ic->i_cm_id == cm_id)
ret = 0;
}
ic->i_active_side = true;
return ret;
}
int rds_ib_conn_path_connect(struct rds_conn_path *cp)
{
struct rds_connection *conn = cp->cp_conn;
struct sockaddr_storage src, dest;
rdma_cm_event_handler handler;
struct rds_ib_connection *ic;
int ret;
ic = conn->c_transport_data;
/* XXX I wonder what affect the port space has */
/* delegate cm event handler to rdma_transport */
#if IS_ENABLED(CONFIG_IPV6)
if (conn->c_isv6)
handler = rds6_rdma_cm_event_handler;
else
#endif
handler = rds_rdma_cm_event_handler;
ic->i_cm_id = rdma_create_id(&init_net, handler, conn,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ic->i_cm_id)) {
ret = PTR_ERR(ic->i_cm_id);
ic->i_cm_id = NULL;
rdsdebug("rdma_create_id() failed: %d\n", ret);
goto out;
}
rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
if (ipv6_addr_v4mapped(&conn->c_faddr)) {
struct sockaddr_in *sin;
sin = (struct sockaddr_in *)&src;
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = conn->c_laddr.s6_addr32[3];
sin->sin_port = 0;
sin = (struct sockaddr_in *)&dest;
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = conn->c_faddr.s6_addr32[3];
sin->sin_port = htons(RDS_PORT);
} else {
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *)&src;
sin6->sin6_family = AF_INET6;
sin6->sin6_addr = conn->c_laddr;
sin6->sin6_port = 0;
sin6->sin6_scope_id = conn->c_dev_if;
sin6 = (struct sockaddr_in6 *)&dest;
sin6->sin6_family = AF_INET6;
sin6->sin6_addr = conn->c_faddr;
sin6->sin6_port = htons(RDS_CM_PORT);
sin6->sin6_scope_id = conn->c_dev_if;
}
ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
(struct sockaddr *)&dest,
RDS_RDMA_RESOLVE_TIMEOUT_MS);
if (ret) {
rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
ret);
rdma_destroy_id(ic->i_cm_id);
ic->i_cm_id = NULL;
}
out:
return ret;
}
/*
* This is so careful about only cleaning up resources that were built up
* so that it can be called at any point during startup. In fact it
* can be called multiple times for a given connection.
*/
void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
{
struct rds_connection *conn = cp->cp_conn;
struct rds_ib_connection *ic = conn->c_transport_data;
int err = 0;
rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
ic->i_cm_id ? ic->i_cm_id->qp : NULL);
if (ic->i_cm_id) {
rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
err = rdma_disconnect(ic->i_cm_id);
if (err) {
/* Actually this may happen quite frequently, when
* an outgoing connect raced with an incoming connect.
*/
rdsdebug("failed to disconnect, cm: %p err %d\n",
ic->i_cm_id, err);
}
/* kick off "flush_worker" for all pools in order to reap
* all FRMR registrations that are still marked "FRMR_IS_INUSE"
*/
rds_ib_flush_mrs();
/*
* We want to wait for tx and rx completion to finish
* before we tear down the connection, but we have to be
* careful not to get stuck waiting on a send ring that
* only has unsignaled sends in it. We've shutdown new
* sends before getting here so by waiting for signaled
* sends to complete we're ensured that there will be no
* more tx processing.
*/
wait_event(rds_ib_ring_empty_wait,
rds_ib_ring_empty(&ic->i_recv_ring) &&
(atomic_read(&ic->i_signaled_sends) == 0) &&
(atomic_read(&ic->i_fastreg_inuse_count) == 0) &&
(atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR));
tasklet_kill(&ic->i_send_tasklet);
tasklet_kill(&ic->i_recv_tasklet);
atomic_set(&ic->i_cq_quiesce, 1);
/* first destroy the ib state that generates callbacks */
if (ic->i_cm_id->qp)
rdma_destroy_qp(ic->i_cm_id);
if (ic->i_send_cq) {
if (ic->rds_ibdev)
ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector);
ib_destroy_cq(ic->i_send_cq);
}
if (ic->i_recv_cq) {
if (ic->rds_ibdev)
ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector);
ib_destroy_cq(ic->i_recv_cq);
}
if (ic->rds_ibdev) {
/* then free the resources that ib callbacks use */
if (ic->i_send_hdrs) {
rds_dma_hdrs_free(ic->rds_ibdev,
ic->i_send_hdrs,
ic->i_send_hdrs_dma,
ic->i_send_ring.w_nr,
DMA_TO_DEVICE);
ic->i_send_hdrs = NULL;
ic->i_send_hdrs_dma = NULL;
}
if (ic->i_recv_hdrs) {
rds_dma_hdrs_free(ic->rds_ibdev,
ic->i_recv_hdrs,
ic->i_recv_hdrs_dma,
ic->i_recv_ring.w_nr,
DMA_FROM_DEVICE);
ic->i_recv_hdrs = NULL;
ic->i_recv_hdrs_dma = NULL;
}
if (ic->i_ack) {
rds_dma_hdr_free(ic->rds_ibdev->dev, ic->i_ack,
ic->i_ack_dma, DMA_TO_DEVICE);
ic->i_ack = NULL;
}
} else {
WARN_ON(ic->i_send_hdrs);
WARN_ON(ic->i_send_hdrs_dma);
WARN_ON(ic->i_recv_hdrs);
WARN_ON(ic->i_recv_hdrs_dma);
WARN_ON(ic->i_ack);
}
if (ic->i_sends)
rds_ib_send_clear_ring(ic);
if (ic->i_recvs)
rds_ib_recv_clear_ring(ic);
rdma_destroy_id(ic->i_cm_id);
/*
* Move connection back to the nodev list.
*/
if (ic->rds_ibdev)
rds_ib_remove_conn(ic->rds_ibdev, conn);
ic->i_cm_id = NULL;
ic->i_pd = NULL;
ic->i_send_cq = NULL;
ic->i_recv_cq = NULL;
}
BUG_ON(ic->rds_ibdev);
/* Clear pending transmit */
if (ic->i_data_op) {
struct rds_message *rm;
rm = container_of(ic->i_data_op, struct rds_message, data);
rds_message_put(rm);
ic->i_data_op = NULL;
}
/* Clear the ACK state */
clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
#ifdef KERNEL_HAS_ATOMIC64
atomic64_set(&ic->i_ack_next, 0);
#else
ic->i_ack_next = 0;
#endif
ic->i_ack_recv = 0;
/* Clear flow control state */
ic->i_flowctl = 0;
atomic_set(&ic->i_credits, 0);
/* Re-init rings, but retain sizes. */
rds_ib_ring_init(&ic->i_send_ring, ic->i_send_ring.w_nr);
rds_ib_ring_init(&ic->i_recv_ring, ic->i_recv_ring.w_nr);
if (ic->i_ibinc) {
rds_inc_put(&ic->i_ibinc->ii_inc);
ic->i_ibinc = NULL;
}
vfree(ic->i_sends);
ic->i_sends = NULL;
vfree(ic->i_recvs);
ic->i_recvs = NULL;
ic->i_active_side = false;
}
int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
{
struct rds_ib_connection *ic;
unsigned long flags;
int ret;
/* XXX too lazy? */
ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
if (!ic)
return -ENOMEM;
ret = rds_ib_recv_alloc_caches(ic, gfp);
if (ret) {
kfree(ic);
return ret;
}
INIT_LIST_HEAD(&ic->ib_node);
tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send,
(unsigned long)ic);
tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv,
(unsigned long)ic);
mutex_init(&ic->i_recv_mutex);
#ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&ic->i_ack_lock);
#endif
atomic_set(&ic->i_signaled_sends, 0);
atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
/*
* rds_ib_conn_shutdown() waits for these to be emptied so they
* must be initialized before it can be called.
*/
rds_ib_ring_init(&ic->i_send_ring, 0);
rds_ib_ring_init(&ic->i_recv_ring, 0);
ic->conn = conn;
conn->c_transport_data = ic;
spin_lock_irqsave(&ib_nodev_conns_lock, flags);
list_add_tail(&ic->ib_node, &ib_nodev_conns);
spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
return 0;
}
/*
* Free a connection. Connection must be shut down and not set for reconnect.
*/
void rds_ib_conn_free(void *arg)
{
struct rds_ib_connection *ic = arg;
spinlock_t *lock_ptr;
rdsdebug("ic %p\n", ic);
/*
* Conn is either on a dev's list or on the nodev list.
* A race with shutdown() or connect() would cause problems
* (since rds_ibdev would change) but that should never happen.
*/
lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
spin_lock_irq(lock_ptr);
list_del(&ic->ib_node);
spin_unlock_irq(lock_ptr);
rds_ib_recv_free_caches(ic);
kfree(ic);
}
/*
* An error occurred on the connection
*/
void
__rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...)
{
va_list ap;
rds_conn_drop(conn);
va_start(ap, fmt);
vprintk(fmt, ap);
va_end(ap);
}
| linux-master | net/rds/ib_cm.c |
/*
* Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/module.h>
#include <net/tcp.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/addrconf.h>
#include "rds.h"
#include "tcp.h"
/* only for info exporting */
static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
static LIST_HEAD(rds_tcp_tc_list);
/* rds_tcp_tc_count counts only IPv4 connections.
* rds6_tcp_tc_count counts both IPv4 and IPv6 connections.
*/
static unsigned int rds_tcp_tc_count;
#if IS_ENABLED(CONFIG_IPV6)
static unsigned int rds6_tcp_tc_count;
#endif
/* Track rds_tcp_connection structs so they can be cleaned up */
static DEFINE_SPINLOCK(rds_tcp_conn_lock);
static LIST_HEAD(rds_tcp_conn_list);
static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
static struct kmem_cache *rds_tcp_conn_slab;
static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *fpos);
static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
static struct ctl_table rds_tcp_sysctl_table[] = {
#define RDS_TCP_SNDBUF 0
{
.procname = "rds_tcp_sndbuf",
/* data is per-net pointer */
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = rds_tcp_skbuf_handler,
.extra1 = &rds_tcp_min_sndbuf,
},
#define RDS_TCP_RCVBUF 1
{
.procname = "rds_tcp_rcvbuf",
/* data is per-net pointer */
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = rds_tcp_skbuf_handler,
.extra1 = &rds_tcp_min_rcvbuf,
},
{ }
};
u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
{
/* seq# of the last byte of data in tcp send buffer */
return tcp_sk(tc->t_sock->sk)->write_seq;
}
u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
{
return tcp_sk(tc->t_sock->sk)->snd_una;
}
void rds_tcp_restore_callbacks(struct socket *sock,
struct rds_tcp_connection *tc)
{
rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
write_lock_bh(&sock->sk->sk_callback_lock);
/* done under the callback_lock to serialize with write_space */
spin_lock(&rds_tcp_tc_list_lock);
list_del_init(&tc->t_list_item);
#if IS_ENABLED(CONFIG_IPV6)
rds6_tcp_tc_count--;
#endif
if (!tc->t_cpath->cp_conn->c_isv6)
rds_tcp_tc_count--;
spin_unlock(&rds_tcp_tc_list_lock);
tc->t_sock = NULL;
sock->sk->sk_write_space = tc->t_orig_write_space;
sock->sk->sk_data_ready = tc->t_orig_data_ready;
sock->sk->sk_state_change = tc->t_orig_state_change;
sock->sk->sk_user_data = NULL;
write_unlock_bh(&sock->sk->sk_callback_lock);
}
/*
* rds_tcp_reset_callbacks() switches the to the new sock and
* returns the existing tc->t_sock.
*
* The only functions that set tc->t_sock are rds_tcp_set_callbacks
* and rds_tcp_reset_callbacks. Send and receive trust that
* it is set. The absence of RDS_CONN_UP bit protects those paths
* from being called while it isn't set.
*/
void rds_tcp_reset_callbacks(struct socket *sock,
struct rds_conn_path *cp)
{
struct rds_tcp_connection *tc = cp->cp_transport_data;
struct socket *osock = tc->t_sock;
if (!osock)
goto newsock;
/* Need to resolve a duelling SYN between peers.
* We have an outstanding SYN to this peer, which may
* potentially have transitioned to the RDS_CONN_UP state,
* so we must quiesce any send threads before resetting
* cp_transport_data. We quiesce these threads by setting
* cp_state to something other than RDS_CONN_UP, and then
* waiting for any existing threads in rds_send_xmit to
* complete release_in_xmit(). (Subsequent threads entering
* rds_send_xmit() will bail on !rds_conn_up().
*
* However an incoming syn-ack at this point would end up
* marking the conn as RDS_CONN_UP, and would again permit
* rds_send_xmi() threads through, so ideally we would
* synchronize on RDS_CONN_UP after lock_sock(), but cannot
* do that: waiting on !RDS_IN_XMIT after lock_sock() may
* end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
* would not get set. As a result, we set c_state to
* RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
* cannot mark rds_conn_path_up() in the window before lock_sock()
*/
atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
/* reset receive side state for rds_tcp_data_recv() for osock */
cancel_delayed_work_sync(&cp->cp_send_w);
cancel_delayed_work_sync(&cp->cp_recv_w);
lock_sock(osock->sk);
if (tc->t_tinc) {
rds_inc_put(&tc->t_tinc->ti_inc);
tc->t_tinc = NULL;
}
tc->t_tinc_hdr_rem = sizeof(struct rds_header);
tc->t_tinc_data_rem = 0;
rds_tcp_restore_callbacks(osock, tc);
release_sock(osock->sk);
sock_release(osock);
newsock:
rds_send_path_reset(cp);
lock_sock(sock->sk);
rds_tcp_set_callbacks(sock, cp);
release_sock(sock->sk);
}
/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
* above rds_tcp_reset_callbacks for notes about synchronization
* with data path
*/
void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp)
{
struct rds_tcp_connection *tc = cp->cp_transport_data;
rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
write_lock_bh(&sock->sk->sk_callback_lock);
/* done under the callback_lock to serialize with write_space */
spin_lock(&rds_tcp_tc_list_lock);
list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
#if IS_ENABLED(CONFIG_IPV6)
rds6_tcp_tc_count++;
#endif
if (!tc->t_cpath->cp_conn->c_isv6)
rds_tcp_tc_count++;
spin_unlock(&rds_tcp_tc_list_lock);
/* accepted sockets need our listen data ready undone */
if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
sock->sk->sk_data_ready = sock->sk->sk_user_data;
tc->t_sock = sock;
tc->t_cpath = cp;
tc->t_orig_data_ready = sock->sk->sk_data_ready;
tc->t_orig_write_space = sock->sk->sk_write_space;
tc->t_orig_state_change = sock->sk->sk_state_change;
sock->sk->sk_user_data = cp;
sock->sk->sk_data_ready = rds_tcp_data_ready;
sock->sk->sk_write_space = rds_tcp_write_space;
sock->sk->sk_state_change = rds_tcp_state_change;
write_unlock_bh(&sock->sk->sk_callback_lock);
}
/* Handle RDS_INFO_TCP_SOCKETS socket option. It only returns IPv4
* connections for backward compatibility.
*/
static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
struct rds_info_tcp_socket tsinfo;
struct rds_tcp_connection *tc;
unsigned long flags;
spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
if (len / sizeof(tsinfo) < rds_tcp_tc_count)
goto out;
list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
struct inet_sock *inet = inet_sk(tc->t_sock->sk);
if (tc->t_cpath->cp_conn->c_isv6)
continue;
tsinfo.local_addr = inet->inet_saddr;
tsinfo.local_port = inet->inet_sport;
tsinfo.peer_addr = inet->inet_daddr;
tsinfo.peer_port = inet->inet_dport;
tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
tsinfo.data_rem = tc->t_tinc_data_rem;
tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
tsinfo.last_expected_una = tc->t_last_expected_una;
tsinfo.last_seen_una = tc->t_last_seen_una;
tsinfo.tos = tc->t_cpath->cp_conn->c_tos;
rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
}
out:
lens->nr = rds_tcp_tc_count;
lens->each = sizeof(tsinfo);
spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
}
#if IS_ENABLED(CONFIG_IPV6)
/* Handle RDS6_INFO_TCP_SOCKETS socket option. It returns both IPv4 and
* IPv6 connections. IPv4 connection address is returned in an IPv4 mapped
* address.
*/
static void rds6_tcp_tc_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
struct rds6_info_tcp_socket tsinfo6;
struct rds_tcp_connection *tc;
unsigned long flags;
spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
if (len / sizeof(tsinfo6) < rds6_tcp_tc_count)
goto out;
list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
struct sock *sk = tc->t_sock->sk;
struct inet_sock *inet = inet_sk(sk);
tsinfo6.local_addr = sk->sk_v6_rcv_saddr;
tsinfo6.local_port = inet->inet_sport;
tsinfo6.peer_addr = sk->sk_v6_daddr;
tsinfo6.peer_port = inet->inet_dport;
tsinfo6.hdr_rem = tc->t_tinc_hdr_rem;
tsinfo6.data_rem = tc->t_tinc_data_rem;
tsinfo6.last_sent_nxt = tc->t_last_sent_nxt;
tsinfo6.last_expected_una = tc->t_last_expected_una;
tsinfo6.last_seen_una = tc->t_last_seen_una;
rds_info_copy(iter, &tsinfo6, sizeof(tsinfo6));
}
out:
lens->nr = rds6_tcp_tc_count;
lens->each = sizeof(tsinfo6);
spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
}
#endif
int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
__u32 scope_id)
{
struct net_device *dev = NULL;
#if IS_ENABLED(CONFIG_IPV6)
int ret;
#endif
if (ipv6_addr_v4mapped(addr)) {
if (inet_addr_type(net, addr->s6_addr32[3]) == RTN_LOCAL)
return 0;
return -EADDRNOTAVAIL;
}
/* If the scope_id is specified, check only those addresses
* hosted on the specified interface.
*/
if (scope_id != 0) {
rcu_read_lock();
dev = dev_get_by_index_rcu(net, scope_id);
/* scope_id is not valid... */
if (!dev) {
rcu_read_unlock();
return -EADDRNOTAVAIL;
}
rcu_read_unlock();
}
#if IS_ENABLED(CONFIG_IPV6)
ret = ipv6_chk_addr(net, addr, dev, 0);
if (ret)
return 0;
#endif
return -EADDRNOTAVAIL;
}
static void rds_tcp_conn_free(void *arg)
{
struct rds_tcp_connection *tc = arg;
unsigned long flags;
rdsdebug("freeing tc %p\n", tc);
spin_lock_irqsave(&rds_tcp_conn_lock, flags);
if (!tc->t_tcp_node_detached)
list_del(&tc->t_tcp_node);
spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
kmem_cache_free(rds_tcp_conn_slab, tc);
}
static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
{
struct rds_tcp_connection *tc;
int i, j;
int ret = 0;
for (i = 0; i < RDS_MPATH_WORKERS; i++) {
tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
if (!tc) {
ret = -ENOMEM;
goto fail;
}
mutex_init(&tc->t_conn_path_lock);
tc->t_sock = NULL;
tc->t_tinc = NULL;
tc->t_tinc_hdr_rem = sizeof(struct rds_header);
tc->t_tinc_data_rem = 0;
conn->c_path[i].cp_transport_data = tc;
tc->t_cpath = &conn->c_path[i];
tc->t_tcp_node_detached = true;
rdsdebug("rds_conn_path [%d] tc %p\n", i,
conn->c_path[i].cp_transport_data);
}
spin_lock_irq(&rds_tcp_conn_lock);
for (i = 0; i < RDS_MPATH_WORKERS; i++) {
tc = conn->c_path[i].cp_transport_data;
tc->t_tcp_node_detached = false;
list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
}
spin_unlock_irq(&rds_tcp_conn_lock);
fail:
if (ret) {
for (j = 0; j < i; j++)
rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
}
return ret;
}
static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
{
struct rds_tcp_connection *tc, *_tc;
list_for_each_entry_safe(tc, _tc, list, t_tcp_node) {
if (tc->t_cpath->cp_conn == conn)
return true;
}
return false;
}
static void rds_tcp_set_unloading(void)
{
atomic_set(&rds_tcp_unloading, 1);
}
static bool rds_tcp_is_unloading(struct rds_connection *conn)
{
return atomic_read(&rds_tcp_unloading) != 0;
}
static void rds_tcp_destroy_conns(void)
{
struct rds_tcp_connection *tc, *_tc;
LIST_HEAD(tmp_list);
/* avoid calling conn_destroy with irqs off */
spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
list_move_tail(&tc->t_tcp_node, &tmp_list);
}
spin_unlock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
rds_conn_destroy(tc->t_cpath->cp_conn);
}
static void rds_tcp_exit(void);
static u8 rds_tcp_get_tos_map(u8 tos)
{
/* all user tos mapped to default 0 for TCP transport */
return 0;
}
struct rds_transport rds_tcp_transport = {
.laddr_check = rds_tcp_laddr_check,
.xmit_path_prepare = rds_tcp_xmit_path_prepare,
.xmit_path_complete = rds_tcp_xmit_path_complete,
.xmit = rds_tcp_xmit,
.recv_path = rds_tcp_recv_path,
.conn_alloc = rds_tcp_conn_alloc,
.conn_free = rds_tcp_conn_free,
.conn_path_connect = rds_tcp_conn_path_connect,
.conn_path_shutdown = rds_tcp_conn_path_shutdown,
.inc_copy_to_user = rds_tcp_inc_copy_to_user,
.inc_free = rds_tcp_inc_free,
.stats_info_copy = rds_tcp_stats_info_copy,
.exit = rds_tcp_exit,
.get_tos_map = rds_tcp_get_tos_map,
.t_owner = THIS_MODULE,
.t_name = "tcp",
.t_type = RDS_TRANS_TCP,
.t_prefer_loopback = 1,
.t_mp_capable = 1,
.t_unloading = rds_tcp_is_unloading,
};
static unsigned int rds_tcp_netid;
/* per-network namespace private data for this module */
struct rds_tcp_net {
struct socket *rds_tcp_listen_sock;
struct work_struct rds_tcp_accept_w;
struct ctl_table_header *rds_tcp_sysctl;
struct ctl_table *ctl_table;
int sndbuf_size;
int rcvbuf_size;
};
/* All module specific customizations to the RDS-TCP socket should be done in
* rds_tcp_tune() and applied after socket creation.
*/
bool rds_tcp_tune(struct socket *sock)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct rds_tcp_net *rtn;
tcp_sock_set_nodelay(sock->sk);
lock_sock(sk);
/* TCP timer functions might access net namespace even after
* a process which created this net namespace terminated.
*/
if (!sk->sk_net_refcnt) {
if (!maybe_get_net(net)) {
release_sock(sk);
return false;
}
/* Update ns_tracker to current stack trace and refcounted tracker */
__netns_tracker_free(net, &sk->ns_tracker, false);
sk->sk_net_refcnt = 1;
netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL);
sock_inuse_add(net, 1);
}
rtn = net_generic(net, rds_tcp_netid);
if (rtn->sndbuf_size > 0) {
sk->sk_sndbuf = rtn->sndbuf_size;
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
}
if (rtn->rcvbuf_size > 0) {
sk->sk_rcvbuf = rtn->rcvbuf_size;
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
}
release_sock(sk);
return true;
}
static void rds_tcp_accept_worker(struct work_struct *work)
{
struct rds_tcp_net *rtn = container_of(work,
struct rds_tcp_net,
rds_tcp_accept_w);
while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
cond_resched();
}
void rds_tcp_accept_work(struct sock *sk)
{
struct net *net = sock_net(sk);
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
queue_work(rds_wq, &rtn->rds_tcp_accept_w);
}
static __net_init int rds_tcp_init_net(struct net *net)
{
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
struct ctl_table *tbl;
int err = 0;
memset(rtn, 0, sizeof(*rtn));
/* {snd, rcv}buf_size default to 0, which implies we let the
* stack pick the value, and permit auto-tuning of buffer size.
*/
if (net == &init_net) {
tbl = rds_tcp_sysctl_table;
} else {
tbl = kmemdup(rds_tcp_sysctl_table,
sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
if (!tbl) {
pr_warn("could not set allocate sysctl table\n");
return -ENOMEM;
}
rtn->ctl_table = tbl;
}
tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size;
tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size;
rtn->rds_tcp_sysctl = register_net_sysctl_sz(net, "net/rds/tcp", tbl,
ARRAY_SIZE(rds_tcp_sysctl_table));
if (!rtn->rds_tcp_sysctl) {
pr_warn("could not register sysctl\n");
err = -ENOMEM;
goto fail;
}
#if IS_ENABLED(CONFIG_IPV6)
rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true);
#else
rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
#endif
if (!rtn->rds_tcp_listen_sock) {
pr_warn("could not set up IPv6 listen sock\n");
#if IS_ENABLED(CONFIG_IPV6)
/* Try IPv4 as some systems disable IPv6 */
rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
if (!rtn->rds_tcp_listen_sock) {
#endif
unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
rtn->rds_tcp_sysctl = NULL;
err = -EAFNOSUPPORT;
goto fail;
#if IS_ENABLED(CONFIG_IPV6)
}
#endif
}
INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
return 0;
fail:
if (net != &init_net)
kfree(tbl);
return err;
}
static void rds_tcp_kill_sock(struct net *net)
{
struct rds_tcp_connection *tc, *_tc;
LIST_HEAD(tmp_list);
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
struct socket *lsock = rtn->rds_tcp_listen_sock;
rtn->rds_tcp_listen_sock = NULL;
rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
if (net != c_net)
continue;
if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
list_move_tail(&tc->t_tcp_node, &tmp_list);
} else {
list_del(&tc->t_tcp_node);
tc->t_tcp_node_detached = true;
}
}
spin_unlock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
rds_conn_destroy(tc->t_cpath->cp_conn);
}
static void __net_exit rds_tcp_exit_net(struct net *net)
{
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
rds_tcp_kill_sock(net);
if (rtn->rds_tcp_sysctl)
unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
if (net != &init_net)
kfree(rtn->ctl_table);
}
static struct pernet_operations rds_tcp_net_ops = {
.init = rds_tcp_init_net,
.exit = rds_tcp_exit_net,
.id = &rds_tcp_netid,
.size = sizeof(struct rds_tcp_net),
};
void *rds_tcp_listen_sock_def_readable(struct net *net)
{
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
struct socket *lsock = rtn->rds_tcp_listen_sock;
if (!lsock)
return NULL;
return lsock->sk->sk_user_data;
}
/* when sysctl is used to modify some kernel socket parameters,this
* function resets the RDS connections in that netns so that we can
* restart with new parameters. The assumption is that such reset
* events are few and far-between.
*/
static void rds_tcp_sysctl_reset(struct net *net)
{
struct rds_tcp_connection *tc, *_tc;
spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
if (net != c_net || !tc->t_sock)
continue;
/* reconnect with new parameters */
rds_conn_path_drop(tc->t_cpath, false);
}
spin_unlock_irq(&rds_tcp_conn_lock);
}
static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *fpos)
{
struct net *net = current->nsproxy->net_ns;
int err;
err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
if (err < 0) {
pr_warn("Invalid input. Must be >= %d\n",
*(int *)(ctl->extra1));
return err;
}
if (write)
rds_tcp_sysctl_reset(net);
return 0;
}
static void rds_tcp_exit(void)
{
rds_tcp_set_unloading();
synchronize_rcu();
rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
#if IS_ENABLED(CONFIG_IPV6)
rds_info_deregister_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
#endif
unregister_pernet_device(&rds_tcp_net_ops);
rds_tcp_destroy_conns();
rds_trans_unregister(&rds_tcp_transport);
rds_tcp_recv_exit();
kmem_cache_destroy(rds_tcp_conn_slab);
}
module_exit(rds_tcp_exit);
static int __init rds_tcp_init(void)
{
int ret;
rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
sizeof(struct rds_tcp_connection),
0, 0, NULL);
if (!rds_tcp_conn_slab) {
ret = -ENOMEM;
goto out;
}
ret = rds_tcp_recv_init();
if (ret)
goto out_slab;
ret = register_pernet_device(&rds_tcp_net_ops);
if (ret)
goto out_recv;
rds_trans_register(&rds_tcp_transport);
rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
#if IS_ENABLED(CONFIG_IPV6)
rds_info_register_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
#endif
goto out;
out_recv:
rds_tcp_recv_exit();
out_slab:
kmem_cache_destroy(rds_tcp_conn_slab);
out:
return ret;
}
module_init(rds_tcp_init);
MODULE_AUTHOR("Oracle Corporation <[email protected]>");
MODULE_DESCRIPTION("RDS: TCP transport");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | net/rds/tcp.c |
/*
* Copyright (c) 2016 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "ib_mr.h"
static inline void
rds_transition_frwr_state(struct rds_ib_mr *ibmr,
enum rds_ib_fr_state old_state,
enum rds_ib_fr_state new_state)
{
if (cmpxchg(&ibmr->u.frmr.fr_state,
old_state, new_state) == old_state &&
old_state == FRMR_IS_INUSE) {
/* enforce order of ibmr->u.frmr.fr_state update
* before decrementing i_fastreg_inuse_count
*/
smp_mb__before_atomic();
atomic_dec(&ibmr->ic->i_fastreg_inuse_count);
if (waitqueue_active(&rds_ib_ring_empty_wait))
wake_up(&rds_ib_ring_empty_wait);
}
}
static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
int npages)
{
struct rds_ib_mr_pool *pool;
struct rds_ib_mr *ibmr = NULL;
struct rds_ib_frmr *frmr;
int err = 0;
if (npages <= RDS_MR_8K_MSG_SIZE)
pool = rds_ibdev->mr_8k_pool;
else
pool = rds_ibdev->mr_1m_pool;
ibmr = rds_ib_try_reuse_ibmr(pool);
if (ibmr)
return ibmr;
ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
rdsibdev_to_node(rds_ibdev));
if (!ibmr) {
err = -ENOMEM;
goto out_no_cigar;
}
frmr = &ibmr->u.frmr;
frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG,
pool->max_pages);
if (IS_ERR(frmr->mr)) {
pr_warn("RDS/IB: %s failed to allocate MR", __func__);
err = PTR_ERR(frmr->mr);
goto out_no_cigar;
}
ibmr->pool = pool;
if (pool->pool_type == RDS_IB_MR_8K_POOL)
rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
if (atomic_read(&pool->item_count) > pool->max_items_soft)
pool->max_items_soft = pool->max_items;
frmr->fr_state = FRMR_IS_FREE;
init_waitqueue_head(&frmr->fr_inv_done);
init_waitqueue_head(&frmr->fr_reg_done);
return ibmr;
out_no_cigar:
kfree(ibmr);
atomic_dec(&pool->item_count);
return ERR_PTR(err);
}
static void rds_ib_free_frmr(struct rds_ib_mr *ibmr, bool drop)
{
struct rds_ib_mr_pool *pool = ibmr->pool;
if (drop)
llist_add(&ibmr->llnode, &pool->drop_list);
else
llist_add(&ibmr->llnode, &pool->free_list);
atomic_add(ibmr->sg_len, &pool->free_pinned);
atomic_inc(&pool->dirty_count);
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
atomic_read(&pool->dirty_count) >= pool->max_items / 5)
queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
}
static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
{
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
struct ib_reg_wr reg_wr;
int ret, off = 0;
while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
atomic_inc(&ibmr->ic->i_fastreg_wrs);
cpu_relax();
}
ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
&off, PAGE_SIZE);
if (unlikely(ret != ibmr->sg_dma_len))
return ret < 0 ? ret : -EINVAL;
if (cmpxchg(&frmr->fr_state,
FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE)
return -EBUSY;
atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
/* Perform a WR for the fast_reg_mr. Each individual page
* in the sg list is added to the fast reg page list and placed
* inside the fast_reg_mr WR. The key used is a rolling 8bit
* counter, which should guarantee uniqueness.
*/
ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++);
frmr->fr_reg = true;
memset(®_wr, 0, sizeof(reg_wr));
reg_wr.wr.wr_id = (unsigned long)(void *)ibmr;
reg_wr.wr.opcode = IB_WR_REG_MR;
reg_wr.wr.num_sge = 0;
reg_wr.mr = frmr->mr;
reg_wr.key = frmr->mr->rkey;
reg_wr.access = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE;
reg_wr.wr.send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(ibmr->ic->i_cm_id->qp, ®_wr.wr, NULL);
if (unlikely(ret)) {
/* Failure here can be because of -ENOMEM as well */
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
atomic_inc(&ibmr->ic->i_fastreg_wrs);
if (printk_ratelimit())
pr_warn("RDS/IB: %s returned error(%d)\n",
__func__, ret);
goto out;
}
/* Wait for the registration to complete in order to prevent an invalid
* access error resulting from a race between the memory region already
* being accessed while registration is still pending.
*/
wait_event(frmr->fr_reg_done, !frmr->fr_reg);
out:
return ret;
}
static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev,
struct rds_ib_mr_pool *pool,
struct rds_ib_mr *ibmr,
struct scatterlist *sg, unsigned int sg_len)
{
struct ib_device *dev = rds_ibdev->dev;
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
int i;
u32 len;
int ret = 0;
/* We want to teardown old ibmr values here and fill it up with
* new sg values
*/
rds_ib_teardown_mr(ibmr);
ibmr->sg = sg;
ibmr->sg_len = sg_len;
ibmr->sg_dma_len = 0;
frmr->sg_byte_len = 0;
WARN_ON(ibmr->sg_dma_len);
ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len,
DMA_BIDIRECTIONAL);
if (unlikely(!ibmr->sg_dma_len)) {
pr_warn("RDS/IB: %s failed!\n", __func__);
return -EBUSY;
}
frmr->sg_byte_len = 0;
frmr->dma_npages = 0;
len = 0;
ret = -EINVAL;
for (i = 0; i < ibmr->sg_dma_len; ++i) {
unsigned int dma_len = sg_dma_len(&ibmr->sg[i]);
u64 dma_addr = sg_dma_address(&ibmr->sg[i]);
frmr->sg_byte_len += dma_len;
if (dma_addr & ~PAGE_MASK) {
if (i > 0)
goto out_unmap;
else
++frmr->dma_npages;
}
if ((dma_addr + dma_len) & ~PAGE_MASK) {
if (i < ibmr->sg_dma_len - 1)
goto out_unmap;
else
++frmr->dma_npages;
}
len += dma_len;
}
frmr->dma_npages += len >> PAGE_SHIFT;
if (frmr->dma_npages > ibmr->pool->max_pages) {
ret = -EMSGSIZE;
goto out_unmap;
}
ret = rds_ib_post_reg_frmr(ibmr);
if (ret)
goto out_unmap;
if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
return ret;
out_unmap:
ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len,
DMA_BIDIRECTIONAL);
ibmr->sg_dma_len = 0;
return ret;
}
static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
{
struct ib_send_wr *s_wr;
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id;
int ret = -EINVAL;
if (!i_cm_id || !i_cm_id->qp || !frmr->mr)
goto out;
if (frmr->fr_state != FRMR_IS_INUSE)
goto out;
while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
atomic_inc(&ibmr->ic->i_fastreg_wrs);
cpu_relax();
}
frmr->fr_inv = true;
s_wr = &frmr->fr_wr;
memset(s_wr, 0, sizeof(*s_wr));
s_wr->wr_id = (unsigned long)(void *)ibmr;
s_wr->opcode = IB_WR_LOCAL_INV;
s_wr->ex.invalidate_rkey = frmr->mr->rkey;
s_wr->send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(i_cm_id->qp, s_wr, NULL);
if (unlikely(ret)) {
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
frmr->fr_inv = false;
/* enforce order of frmr->fr_inv update
* before incrementing i_fastreg_wrs
*/
smp_mb__before_atomic();
atomic_inc(&ibmr->ic->i_fastreg_wrs);
pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
goto out;
}
/* Wait for the FRMR_IS_FREE (or FRMR_IS_STALE) transition in order to
* 1) avoid a silly bouncing between "clean_list" and "drop_list"
* triggered by function "rds_ib_reg_frmr" as it is releases frmr
* regions whose state is not "FRMR_IS_FREE" right away.
* 2) prevents an invalid access error in a race
* from a pending "IB_WR_LOCAL_INV" operation
* with a teardown ("dma_unmap_sg", "put_page")
* and de-registration ("ib_dereg_mr") of the corresponding
* memory region.
*/
wait_event(frmr->fr_inv_done, frmr->fr_state != FRMR_IS_INUSE);
out:
return ret;
}
void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
{
struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id;
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
if (wc->status != IB_WC_SUCCESS) {
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
if (rds_conn_up(ic->conn))
rds_ib_conn_error(ic->conn,
"frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting\n",
&ic->conn->c_laddr,
&ic->conn->c_faddr,
wc->status,
ib_wc_status_msg(wc->status),
wc->vendor_err);
}
if (frmr->fr_inv) {
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_FREE);
frmr->fr_inv = false;
wake_up(&frmr->fr_inv_done);
}
if (frmr->fr_reg) {
frmr->fr_reg = false;
wake_up(&frmr->fr_reg_done);
}
/* enforce order of frmr->{fr_reg,fr_inv} update
* before incrementing i_fastreg_wrs
*/
smp_mb__before_atomic();
atomic_inc(&ic->i_fastreg_wrs);
}
void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
unsigned long *unpinned, unsigned int goal)
{
struct rds_ib_mr *ibmr, *next;
struct rds_ib_frmr *frmr;
int ret = 0, ret2;
unsigned int freed = *nfreed;
/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
list_for_each_entry(ibmr, list, unmap_list) {
if (ibmr->sg_dma_len) {
ret2 = rds_ib_post_inv(ibmr);
if (ret2 && !ret)
ret = ret2;
}
}
if (ret)
pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, ret);
/* Now we can destroy the DMA mapping and unpin any pages */
list_for_each_entry_safe(ibmr, next, list, unmap_list) {
*unpinned += ibmr->sg_len;
frmr = &ibmr->u.frmr;
__rds_ib_teardown_mr(ibmr);
if (freed < goal || frmr->fr_state == FRMR_IS_STALE) {
/* Don't de-allocate if the MR is not free yet */
if (frmr->fr_state == FRMR_IS_INUSE)
continue;
if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
list_del(&ibmr->unmap_list);
if (frmr->mr)
ib_dereg_mr(frmr->mr);
kfree(ibmr);
freed++;
}
}
*nfreed = freed;
}
struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
struct rds_ib_connection *ic,
struct scatterlist *sg,
unsigned long nents, u32 *key)
{
struct rds_ib_mr *ibmr = NULL;
struct rds_ib_frmr *frmr;
int ret;
if (!ic) {
/* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/
return ERR_PTR(-EOPNOTSUPP);
}
do {
if (ibmr)
rds_ib_free_frmr(ibmr, true);
ibmr = rds_ib_alloc_frmr(rds_ibdev, nents);
if (IS_ERR(ibmr))
return ibmr;
frmr = &ibmr->u.frmr;
} while (frmr->fr_state != FRMR_IS_FREE);
ibmr->ic = ic;
ibmr->device = rds_ibdev;
ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents);
if (ret == 0) {
*key = frmr->mr->rkey;
} else {
rds_ib_free_frmr(ibmr, false);
ibmr = ERR_PTR(ret);
}
return ibmr;
}
void rds_ib_free_frmr_list(struct rds_ib_mr *ibmr)
{
struct rds_ib_mr_pool *pool = ibmr->pool;
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
if (frmr->fr_state == FRMR_IS_STALE)
llist_add(&ibmr->llnode, &pool->drop_list);
else
llist_add(&ibmr->llnode, &pool->free_list);
}
| linux-master | net/rds/ib_frmr.c |
/*
* Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/in.h>
#include <net/tcp.h>
#include <trace/events/sock.h>
#include "rds.h"
#include "tcp.h"
void rds_tcp_keepalive(struct socket *sock)
{
/* values below based on xs_udp_default_timeout */
int keepidle = 5; /* send a probe 'keepidle' secs after last data */
int keepcnt = 5; /* number of unack'ed probes before declaring dead */
sock_set_keepalive(sock->sk);
tcp_sock_set_keepcnt(sock->sk, keepcnt);
tcp_sock_set_keepidle(sock->sk, keepidle);
/* KEEPINTVL is the interval between successive probes. We follow
* the model in xs_tcp_finish_connecting() and re-use keepidle.
*/
tcp_sock_set_keepintvl(sock->sk, keepidle);
}
/* rds_tcp_accept_one_path(): if accepting on cp_index > 0, make sure the
* client's ipaddr < server's ipaddr. Otherwise, close the accepted
* socket and force a reconneect from smaller -> larger ip addr. The reason
* we special case cp_index 0 is to allow the rds probe ping itself to itself
* get through efficiently.
* Since reconnects are only initiated from the node with the numerically
* smaller ip address, we recycle conns in RDS_CONN_ERROR on the passive side
* by moving them to CONNECTING in this function.
*/
static
struct rds_tcp_connection *rds_tcp_accept_one_path(struct rds_connection *conn)
{
int i;
int npaths = max_t(int, 1, conn->c_npaths);
/* for mprds, all paths MUST be initiated by the peer
* with the smaller address.
*/
if (rds_addr_cmp(&conn->c_faddr, &conn->c_laddr) >= 0) {
/* Make sure we initiate at least one path if this
* has not already been done; rds_start_mprds() will
* take care of additional paths, if necessary.
*/
if (npaths == 1)
rds_conn_path_connect_if_down(&conn->c_path[0]);
return NULL;
}
for (i = 0; i < npaths; i++) {
struct rds_conn_path *cp = &conn->c_path[i];
if (rds_conn_path_transition(cp, RDS_CONN_DOWN,
RDS_CONN_CONNECTING) ||
rds_conn_path_transition(cp, RDS_CONN_ERROR,
RDS_CONN_CONNECTING)) {
return cp->cp_transport_data;
}
}
return NULL;
}
int rds_tcp_accept_one(struct socket *sock)
{
struct socket *new_sock = NULL;
struct rds_connection *conn;
int ret;
struct inet_sock *inet;
struct rds_tcp_connection *rs_tcp = NULL;
int conn_state;
struct rds_conn_path *cp;
struct in6_addr *my_addr, *peer_addr;
#if !IS_ENABLED(CONFIG_IPV6)
struct in6_addr saddr, daddr;
#endif
int dev_if = 0;
if (!sock) /* module unload or netns delete in progress */
return -ENETUNREACH;
ret = sock_create_lite(sock->sk->sk_family,
sock->sk->sk_type, sock->sk->sk_protocol,
&new_sock);
if (ret)
goto out;
ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
if (ret < 0)
goto out;
/* sock_create_lite() does not get a hold on the owner module so we
* need to do it here. Note that sock_release() uses sock->ops to
* determine if it needs to decrement the reference count. So set
* sock->ops after calling accept() in case that fails. And there's
* no need to do try_module_get() as the listener should have a hold
* already.
*/
new_sock->ops = sock->ops;
__module_get(new_sock->ops->owner);
rds_tcp_keepalive(new_sock);
if (!rds_tcp_tune(new_sock)) {
ret = -EINVAL;
goto out;
}
inet = inet_sk(new_sock->sk);
#if IS_ENABLED(CONFIG_IPV6)
my_addr = &new_sock->sk->sk_v6_rcv_saddr;
peer_addr = &new_sock->sk->sk_v6_daddr;
#else
ipv6_addr_set_v4mapped(inet->inet_saddr, &saddr);
ipv6_addr_set_v4mapped(inet->inet_daddr, &daddr);
my_addr = &saddr;
peer_addr = &daddr;
#endif
rdsdebug("accepted family %d tcp %pI6c:%u -> %pI6c:%u\n",
sock->sk->sk_family,
my_addr, ntohs(inet->inet_sport),
peer_addr, ntohs(inet->inet_dport));
#if IS_ENABLED(CONFIG_IPV6)
/* sk_bound_dev_if is not set if the peer address is not link local
* address. In this case, it happens that mcast_oif is set. So
* just use it.
*/
if ((ipv6_addr_type(my_addr) & IPV6_ADDR_LINKLOCAL) &&
!(ipv6_addr_type(peer_addr) & IPV6_ADDR_LINKLOCAL)) {
struct ipv6_pinfo *inet6;
inet6 = inet6_sk(new_sock->sk);
dev_if = inet6->mcast_oif;
} else {
dev_if = new_sock->sk->sk_bound_dev_if;
}
#endif
if (!rds_tcp_laddr_check(sock_net(sock->sk), peer_addr, dev_if)) {
/* local address connection is only allowed via loopback */
ret = -EOPNOTSUPP;
goto out;
}
conn = rds_conn_create(sock_net(sock->sk),
my_addr, peer_addr,
&rds_tcp_transport, 0, GFP_KERNEL, dev_if);
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
goto out;
}
/* An incoming SYN request came in, and TCP just accepted it.
*
* If the client reboots, this conn will need to be cleaned up.
* rds_tcp_state_change() will do that cleanup
*/
rs_tcp = rds_tcp_accept_one_path(conn);
if (!rs_tcp)
goto rst_nsk;
mutex_lock(&rs_tcp->t_conn_path_lock);
cp = rs_tcp->t_cpath;
conn_state = rds_conn_path_state(cp);
WARN_ON(conn_state == RDS_CONN_UP);
if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_ERROR)
goto rst_nsk;
if (rs_tcp->t_sock) {
/* Duelling SYN has been handled in rds_tcp_accept_one() */
rds_tcp_reset_callbacks(new_sock, cp);
/* rds_connect_path_complete() marks RDS_CONN_UP */
rds_connect_path_complete(cp, RDS_CONN_RESETTING);
} else {
rds_tcp_set_callbacks(new_sock, cp);
rds_connect_path_complete(cp, RDS_CONN_CONNECTING);
}
new_sock = NULL;
ret = 0;
if (conn->c_npaths == 0)
rds_send_ping(cp->cp_conn, cp->cp_index);
goto out;
rst_nsk:
/* reset the newly returned accept sock and bail.
* It is safe to set linger on new_sock because the RDS connection
* has not been brought up on new_sock, so no RDS-level data could
* be pending on it. By setting linger, we achieve the side-effect
* of avoiding TIME_WAIT state on new_sock.
*/
sock_no_linger(new_sock->sk);
kernel_sock_shutdown(new_sock, SHUT_RDWR);
ret = 0;
out:
if (rs_tcp)
mutex_unlock(&rs_tcp->t_conn_path_lock);
if (new_sock)
sock_release(new_sock);
return ret;
}
void rds_tcp_listen_data_ready(struct sock *sk)
{
void (*ready)(struct sock *sk);
trace_sk_data_ready(sk);
rdsdebug("listen data ready sk %p\n", sk);
read_lock_bh(&sk->sk_callback_lock);
ready = sk->sk_user_data;
if (!ready) { /* check for teardown race */
ready = sk->sk_data_ready;
goto out;
}
/*
* ->sk_data_ready is also called for a newly established child socket
* before it has been accepted and the accepter has set up their
* data_ready.. we only want to queue listen work for our listening
* socket
*
* (*ready)() may be null if we are racing with netns delete, and
* the listen socket is being torn down.
*/
if (sk->sk_state == TCP_LISTEN)
rds_tcp_accept_work(sk);
else
ready = rds_tcp_listen_sock_def_readable(sock_net(sk));
out:
read_unlock_bh(&sk->sk_callback_lock);
if (ready)
ready(sk);
}
struct socket *rds_tcp_listen_init(struct net *net, bool isv6)
{
struct socket *sock = NULL;
struct sockaddr_storage ss;
struct sockaddr_in6 *sin6;
struct sockaddr_in *sin;
int addr_len;
int ret;
ret = sock_create_kern(net, isv6 ? PF_INET6 : PF_INET, SOCK_STREAM,
IPPROTO_TCP, &sock);
if (ret < 0) {
rdsdebug("could not create %s listener socket: %d\n",
isv6 ? "IPv6" : "IPv4", ret);
goto out;
}
sock->sk->sk_reuse = SK_CAN_REUSE;
tcp_sock_set_nodelay(sock->sk);
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_user_data = sock->sk->sk_data_ready;
sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
write_unlock_bh(&sock->sk->sk_callback_lock);
if (isv6) {
sin6 = (struct sockaddr_in6 *)&ss;
sin6->sin6_family = PF_INET6;
sin6->sin6_addr = in6addr_any;
sin6->sin6_port = (__force u16)htons(RDS_TCP_PORT);
sin6->sin6_scope_id = 0;
sin6->sin6_flowinfo = 0;
addr_len = sizeof(*sin6);
} else {
sin = (struct sockaddr_in *)&ss;
sin->sin_family = PF_INET;
sin->sin_addr.s_addr = INADDR_ANY;
sin->sin_port = (__force u16)htons(RDS_TCP_PORT);
addr_len = sizeof(*sin);
}
ret = sock->ops->bind(sock, (struct sockaddr *)&ss, addr_len);
if (ret < 0) {
rdsdebug("could not bind %s listener socket: %d\n",
isv6 ? "IPv6" : "IPv4", ret);
goto out;
}
ret = sock->ops->listen(sock, 64);
if (ret < 0)
goto out;
return sock;
out:
if (sock)
sock_release(sock);
return NULL;
}
void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
{
struct sock *sk;
if (!sock)
return;
sk = sock->sk;
/* serialize with and prevent further callbacks */
lock_sock(sk);
write_lock_bh(&sk->sk_callback_lock);
if (sk->sk_user_data) {
sk->sk_data_ready = sk->sk_user_data;
sk->sk_user_data = NULL;
}
write_unlock_bh(&sk->sk_callback_lock);
release_sock(sk);
/* wait for accepts to stop and close the socket */
flush_workqueue(rds_wq);
flush_work(acceptor);
sock_release(sock);
}
| linux-master | net/rds/tcp_listen.c |
/*
* Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/inet6_hashtables.h>
#include <net/addrconf.h>
#include "rds.h"
#include "loop.h"
#define RDS_CONNECTION_HASH_BITS 12
#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
#define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
/* converting this to RCU is a chore for another day.. */
static DEFINE_SPINLOCK(rds_conn_lock);
static unsigned long rds_conn_count;
static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
static struct kmem_cache *rds_conn_slab;
static struct hlist_head *rds_conn_bucket(const struct in6_addr *laddr,
const struct in6_addr *faddr)
{
static u32 rds6_hash_secret __read_mostly;
static u32 rds_hash_secret __read_mostly;
u32 lhash, fhash, hash;
net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
net_get_random_once(&rds6_hash_secret, sizeof(rds6_hash_secret));
lhash = (__force u32)laddr->s6_addr32[3];
#if IS_ENABLED(CONFIG_IPV6)
fhash = __ipv6_addr_jhash(faddr, rds6_hash_secret);
#else
fhash = (__force u32)faddr->s6_addr32[3];
#endif
hash = __inet_ehashfn(lhash, 0, fhash, 0, rds_hash_secret);
return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
}
#define rds_conn_info_set(var, test, suffix) do { \
if (test) \
var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
} while (0)
/* rcu read lock must be held or the connection spinlock */
static struct rds_connection *rds_conn_lookup(struct net *net,
struct hlist_head *head,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans,
u8 tos, int dev_if)
{
struct rds_connection *conn, *ret = NULL;
hlist_for_each_entry_rcu(conn, head, c_hash_node) {
if (ipv6_addr_equal(&conn->c_faddr, faddr) &&
ipv6_addr_equal(&conn->c_laddr, laddr) &&
conn->c_trans == trans &&
conn->c_tos == tos &&
net == rds_conn_net(conn) &&
conn->c_dev_if == dev_if) {
ret = conn;
break;
}
}
rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret,
laddr, faddr);
return ret;
}
/*
* This is called by transports as they're bringing down a connection.
* It clears partial message state so that the transport can start sending
* and receiving over this connection again in the future. It is up to
* the transport to have serialized this call with its send and recv.
*/
static void rds_conn_path_reset(struct rds_conn_path *cp)
{
struct rds_connection *conn = cp->cp_conn;
rdsdebug("connection %pI6c to %pI6c reset\n",
&conn->c_laddr, &conn->c_faddr);
rds_stats_inc(s_conn_reset);
rds_send_path_reset(cp);
cp->cp_flags = 0;
/* Do not clear next_rx_seq here, else we cannot distinguish
* retransmitted packets from new packets, and will hand all
* of them to the application. That is not consistent with the
* reliability guarantees of RDS. */
}
static void __rds_conn_path_init(struct rds_connection *conn,
struct rds_conn_path *cp, bool is_outgoing)
{
spin_lock_init(&cp->cp_lock);
cp->cp_next_tx_seq = 1;
init_waitqueue_head(&cp->cp_waitq);
INIT_LIST_HEAD(&cp->cp_send_queue);
INIT_LIST_HEAD(&cp->cp_retrans);
cp->cp_conn = conn;
atomic_set(&cp->cp_state, RDS_CONN_DOWN);
cp->cp_send_gen = 0;
cp->cp_reconnect_jiffies = 0;
cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION;
INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
INIT_WORK(&cp->cp_down_w, rds_shutdown_worker);
mutex_init(&cp->cp_cm_lock);
cp->cp_flags = 0;
}
/*
* There is only every one 'conn' for a given pair of addresses in the
* system at a time. They contain messages to be retransmitted and so
* span the lifetime of the actual underlying transport connections.
*
* For now they are not garbage collected once they're created. They
* are torn down as the module is removed, if ever.
*/
static struct rds_connection *__rds_conn_create(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans,
gfp_t gfp, u8 tos,
int is_outgoing,
int dev_if)
{
struct rds_connection *conn, *parent = NULL;
struct hlist_head *head = rds_conn_bucket(laddr, faddr);
struct rds_transport *loop_trans;
unsigned long flags;
int ret, i;
int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
rcu_read_lock();
conn = rds_conn_lookup(net, head, laddr, faddr, trans, tos, dev_if);
if (conn &&
conn->c_loopback &&
conn->c_trans != &rds_loop_transport &&
ipv6_addr_equal(laddr, faddr) &&
!is_outgoing) {
/* This is a looped back IB connection, and we're
* called by the code handling the incoming connect.
* We need a second connection object into which we
* can stick the other QP. */
parent = conn;
conn = parent->c_passive;
}
rcu_read_unlock();
if (conn)
goto out;
conn = kmem_cache_zalloc(rds_conn_slab, gfp);
if (!conn) {
conn = ERR_PTR(-ENOMEM);
goto out;
}
conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp);
if (!conn->c_path) {
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(-ENOMEM);
goto out;
}
INIT_HLIST_NODE(&conn->c_hash_node);
conn->c_laddr = *laddr;
conn->c_isv6 = !ipv6_addr_v4mapped(laddr);
conn->c_faddr = *faddr;
conn->c_dev_if = dev_if;
conn->c_tos = tos;
#if IS_ENABLED(CONFIG_IPV6)
/* If the local address is link local, set c_bound_if to be the
* index used for this connection. Otherwise, set it to 0 as
* the socket is not bound to an interface. c_bound_if is used
* to look up a socket when a packet is received
*/
if (ipv6_addr_type(laddr) & IPV6_ADDR_LINKLOCAL)
conn->c_bound_if = dev_if;
else
#endif
conn->c_bound_if = 0;
rds_conn_net_set(conn, net);
ret = rds_cong_get_maps(conn);
if (ret) {
kfree(conn->c_path);
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(ret);
goto out;
}
/*
* This is where a connection becomes loopback. If *any* RDS sockets
* can bind to the destination address then we'd rather the messages
* flow through loopback rather than either transport.
*/
loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if);
if (loop_trans) {
rds_trans_put(loop_trans);
conn->c_loopback = 1;
if (trans->t_prefer_loopback) {
if (likely(is_outgoing)) {
/* "outgoing" connection to local address.
* Protocol says it wants the connection
* handled by the loopback transport.
* This is what TCP does.
*/
trans = &rds_loop_transport;
} else {
/* No transport currently in use
* should end up here, but if it
* does, reset/destroy the connection.
*/
kfree(conn->c_path);
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(-EOPNOTSUPP);
goto out;
}
}
}
conn->c_trans = trans;
init_waitqueue_head(&conn->c_hs_waitq);
for (i = 0; i < npaths; i++) {
__rds_conn_path_init(conn, &conn->c_path[i],
is_outgoing);
conn->c_path[i].cp_index = i;
}
rcu_read_lock();
if (rds_destroy_pending(conn))
ret = -ENETDOWN;
else
ret = trans->conn_alloc(conn, GFP_ATOMIC);
if (ret) {
rcu_read_unlock();
kfree(conn->c_path);
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(ret);
goto out;
}
rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n",
conn, laddr, faddr,
strnlen(trans->t_name, sizeof(trans->t_name)) ?
trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : "");
/*
* Since we ran without holding the conn lock, someone could
* have created the same conn (either normal or passive) in the
* interim. We check while holding the lock. If we won, we complete
* init and return our conn. If we lost, we rollback and return the
* other one.
*/
spin_lock_irqsave(&rds_conn_lock, flags);
if (parent) {
/* Creating passive conn */
if (parent->c_passive) {
trans->conn_free(conn->c_path[0].cp_transport_data);
kfree(conn->c_path);
kmem_cache_free(rds_conn_slab, conn);
conn = parent->c_passive;
} else {
parent->c_passive = conn;
rds_cong_add_conn(conn);
rds_conn_count++;
}
} else {
/* Creating normal conn */
struct rds_connection *found;
found = rds_conn_lookup(net, head, laddr, faddr, trans,
tos, dev_if);
if (found) {
struct rds_conn_path *cp;
int i;
for (i = 0; i < npaths; i++) {
cp = &conn->c_path[i];
/* The ->conn_alloc invocation may have
* allocated resource for all paths, so all
* of them may have to be freed here.
*/
if (cp->cp_transport_data)
trans->conn_free(cp->cp_transport_data);
}
kfree(conn->c_path);
kmem_cache_free(rds_conn_slab, conn);
conn = found;
} else {
conn->c_my_gen_num = rds_gen_num;
conn->c_peer_gen_num = 0;
hlist_add_head_rcu(&conn->c_hash_node, head);
rds_cong_add_conn(conn);
rds_conn_count++;
}
}
spin_unlock_irqrestore(&rds_conn_lock, flags);
rcu_read_unlock();
out:
return conn;
}
struct rds_connection *rds_conn_create(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans, u8 tos,
gfp_t gfp, int dev_if)
{
return __rds_conn_create(net, laddr, faddr, trans, gfp, tos, 0, dev_if);
}
EXPORT_SYMBOL_GPL(rds_conn_create);
struct rds_connection *rds_conn_create_outgoing(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans,
u8 tos, gfp_t gfp, int dev_if)
{
return __rds_conn_create(net, laddr, faddr, trans, gfp, tos, 1, dev_if);
}
EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
void rds_conn_shutdown(struct rds_conn_path *cp)
{
struct rds_connection *conn = cp->cp_conn;
/* shut it down unless it's down already */
if (!rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
/*
* Quiesce the connection mgmt handlers before we start tearing
* things down. We don't hold the mutex for the entire
* duration of the shutdown operation, else we may be
* deadlocking with the CM handler. Instead, the CM event
* handler is supposed to check for state DISCONNECTING
*/
mutex_lock(&cp->cp_cm_lock);
if (!rds_conn_path_transition(cp, RDS_CONN_UP,
RDS_CONN_DISCONNECTING) &&
!rds_conn_path_transition(cp, RDS_CONN_ERROR,
RDS_CONN_DISCONNECTING)) {
rds_conn_path_error(cp,
"shutdown called in state %d\n",
atomic_read(&cp->cp_state));
mutex_unlock(&cp->cp_cm_lock);
return;
}
mutex_unlock(&cp->cp_cm_lock);
wait_event(cp->cp_waitq,
!test_bit(RDS_IN_XMIT, &cp->cp_flags));
wait_event(cp->cp_waitq,
!test_bit(RDS_RECV_REFILL, &cp->cp_flags));
conn->c_trans->conn_path_shutdown(cp);
rds_conn_path_reset(cp);
if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING,
RDS_CONN_DOWN) &&
!rds_conn_path_transition(cp, RDS_CONN_ERROR,
RDS_CONN_DOWN)) {
/* This can happen - eg when we're in the middle of tearing
* down the connection, and someone unloads the rds module.
* Quite reproducible with loopback connections.
* Mostly harmless.
*
* Note that this also happens with rds-tcp because
* we could have triggered rds_conn_path_drop in irq
* mode from rds_tcp_state change on the receipt of
* a FIN, thus we need to recheck for RDS_CONN_ERROR
* here.
*/
rds_conn_path_error(cp, "%s: failed to transition "
"to state DOWN, current state "
"is %d\n", __func__,
atomic_read(&cp->cp_state));
return;
}
}
/* Then reconnect if it's still live.
* The passive side of an IB loopback connection is never added
* to the conn hash, so we never trigger a reconnect on this
* conn - the reconnect is always triggered by the active peer. */
cancel_delayed_work_sync(&cp->cp_conn_w);
rcu_read_lock();
if (!hlist_unhashed(&conn->c_hash_node)) {
rcu_read_unlock();
rds_queue_reconnect(cp);
} else {
rcu_read_unlock();
}
}
/* destroy a single rds_conn_path. rds_conn_destroy() iterates over
* all paths using rds_conn_path_destroy()
*/
static void rds_conn_path_destroy(struct rds_conn_path *cp)
{
struct rds_message *rm, *rtmp;
if (!cp->cp_transport_data)
return;
/* make sure lingering queued work won't try to ref the conn */
cancel_delayed_work_sync(&cp->cp_send_w);
cancel_delayed_work_sync(&cp->cp_recv_w);
rds_conn_path_drop(cp, true);
flush_work(&cp->cp_down_w);
/* tear down queued messages */
list_for_each_entry_safe(rm, rtmp,
&cp->cp_send_queue,
m_conn_item) {
list_del_init(&rm->m_conn_item);
BUG_ON(!list_empty(&rm->m_sock_item));
rds_message_put(rm);
}
if (cp->cp_xmit_rm)
rds_message_put(cp->cp_xmit_rm);
WARN_ON(delayed_work_pending(&cp->cp_send_w));
WARN_ON(delayed_work_pending(&cp->cp_recv_w));
WARN_ON(delayed_work_pending(&cp->cp_conn_w));
WARN_ON(work_pending(&cp->cp_down_w));
cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
}
/*
* Stop and free a connection.
*
* This can only be used in very limited circumstances. It assumes that once
* the conn has been shutdown that no one else is referencing the connection.
* We can only ensure this in the rmmod path in the current code.
*/
void rds_conn_destroy(struct rds_connection *conn)
{
unsigned long flags;
int i;
struct rds_conn_path *cp;
int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
rdsdebug("freeing conn %p for %pI4 -> "
"%pI4\n", conn, &conn->c_laddr,
&conn->c_faddr);
/* Ensure conn will not be scheduled for reconnect */
spin_lock_irq(&rds_conn_lock);
hlist_del_init_rcu(&conn->c_hash_node);
spin_unlock_irq(&rds_conn_lock);
synchronize_rcu();
/* shut the connection down */
for (i = 0; i < npaths; i++) {
cp = &conn->c_path[i];
rds_conn_path_destroy(cp);
BUG_ON(!list_empty(&cp->cp_retrans));
}
/*
* The congestion maps aren't freed up here. They're
* freed by rds_cong_exit() after all the connections
* have been freed.
*/
rds_cong_remove_conn(conn);
kfree(conn->c_path);
kmem_cache_free(rds_conn_slab, conn);
spin_lock_irqsave(&rds_conn_lock, flags);
rds_conn_count--;
spin_unlock_irqrestore(&rds_conn_lock, flags);
}
EXPORT_SYMBOL_GPL(rds_conn_destroy);
static void __rds_inc_msg_cp(struct rds_incoming *inc,
struct rds_info_iterator *iter,
void *saddr, void *daddr, int flip, bool isv6)
{
#if IS_ENABLED(CONFIG_IPV6)
if (isv6)
rds6_inc_info_copy(inc, iter, saddr, daddr, flip);
else
#endif
rds_inc_info_copy(inc, iter, *(__be32 *)saddr,
*(__be32 *)daddr, flip);
}
static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens,
int want_send, bool isv6)
{
struct hlist_head *head;
struct list_head *list;
struct rds_connection *conn;
struct rds_message *rm;
unsigned int total = 0;
unsigned long flags;
size_t i;
int j;
if (isv6)
len /= sizeof(struct rds6_info_message);
else
len /= sizeof(struct rds_info_message);
rcu_read_lock();
for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
i++, head++) {
hlist_for_each_entry_rcu(conn, head, c_hash_node) {
struct rds_conn_path *cp;
int npaths;
if (!isv6 && conn->c_isv6)
continue;
npaths = (conn->c_trans->t_mp_capable ?
RDS_MPATH_WORKERS : 1);
for (j = 0; j < npaths; j++) {
cp = &conn->c_path[j];
if (want_send)
list = &cp->cp_send_queue;
else
list = &cp->cp_retrans;
spin_lock_irqsave(&cp->cp_lock, flags);
/* XXX too lazy to maintain counts.. */
list_for_each_entry(rm, list, m_conn_item) {
total++;
if (total <= len)
__rds_inc_msg_cp(&rm->m_inc,
iter,
&conn->c_laddr,
&conn->c_faddr,
0, isv6);
}
spin_unlock_irqrestore(&cp->cp_lock, flags);
}
}
}
rcu_read_unlock();
lens->nr = total;
if (isv6)
lens->each = sizeof(struct rds6_info_message);
else
lens->each = sizeof(struct rds_info_message);
}
static void rds_conn_message_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens,
int want_send)
{
rds_conn_message_info_cmn(sock, len, iter, lens, want_send, false);
}
#if IS_ENABLED(CONFIG_IPV6)
static void rds6_conn_message_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens,
int want_send)
{
rds_conn_message_info_cmn(sock, len, iter, lens, want_send, true);
}
#endif
static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
rds_conn_message_info(sock, len, iter, lens, 1);
}
#if IS_ENABLED(CONFIG_IPV6)
static void rds6_conn_message_info_send(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
rds6_conn_message_info(sock, len, iter, lens, 1);
}
#endif
static void rds_conn_message_info_retrans(struct socket *sock,
unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
rds_conn_message_info(sock, len, iter, lens, 0);
}
#if IS_ENABLED(CONFIG_IPV6)
static void rds6_conn_message_info_retrans(struct socket *sock,
unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
rds6_conn_message_info(sock, len, iter, lens, 0);
}
#endif
void rds_for_each_conn_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens,
int (*visitor)(struct rds_connection *, void *),
u64 *buffer,
size_t item_len)
{
struct hlist_head *head;
struct rds_connection *conn;
size_t i;
rcu_read_lock();
lens->nr = 0;
lens->each = item_len;
for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
i++, head++) {
hlist_for_each_entry_rcu(conn, head, c_hash_node) {
/* XXX no c_lock usage.. */
if (!visitor(conn, buffer))
continue;
/* We copy as much as we can fit in the buffer,
* but we count all items so that the caller
* can resize the buffer. */
if (len >= item_len) {
rds_info_copy(iter, buffer, item_len);
len -= item_len;
}
lens->nr++;
}
}
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens,
int (*visitor)(struct rds_conn_path *, void *),
u64 *buffer,
size_t item_len)
{
struct hlist_head *head;
struct rds_connection *conn;
size_t i;
rcu_read_lock();
lens->nr = 0;
lens->each = item_len;
for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
i++, head++) {
hlist_for_each_entry_rcu(conn, head, c_hash_node) {
struct rds_conn_path *cp;
/* XXX We only copy the information from the first
* path for now. The problem is that if there are
* more than one underlying paths, we cannot report
* information of all of them using the existing
* API. For example, there is only one next_tx_seq,
* which path's next_tx_seq should we report? It is
* a bug in the design of MPRDS.
*/
cp = conn->c_path;
/* XXX no cp_lock usage.. */
if (!visitor(cp, buffer))
continue;
/* We copy as much as we can fit in the buffer,
* but we count all items so that the caller
* can resize the buffer.
*/
if (len >= item_len) {
rds_info_copy(iter, buffer, item_len);
len -= item_len;
}
lens->nr++;
}
}
rcu_read_unlock();
}
static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
{
struct rds_info_connection *cinfo = buffer;
struct rds_connection *conn = cp->cp_conn;
if (conn->c_isv6)
return 0;
cinfo->next_tx_seq = cp->cp_next_tx_seq;
cinfo->next_rx_seq = cp->cp_next_rx_seq;
cinfo->laddr = conn->c_laddr.s6_addr32[3];
cinfo->faddr = conn->c_faddr.s6_addr32[3];
cinfo->tos = conn->c_tos;
strncpy(cinfo->transport, conn->c_trans->t_name,
sizeof(cinfo->transport));
cinfo->flags = 0;
rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
SENDING);
/* XXX Future: return the state rather than these funky bits */
rds_conn_info_set(cinfo->flags,
atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
CONNECTING);
rds_conn_info_set(cinfo->flags,
atomic_read(&cp->cp_state) == RDS_CONN_UP,
CONNECTED);
return 1;
}
#if IS_ENABLED(CONFIG_IPV6)
static int rds6_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
{
struct rds6_info_connection *cinfo6 = buffer;
struct rds_connection *conn = cp->cp_conn;
cinfo6->next_tx_seq = cp->cp_next_tx_seq;
cinfo6->next_rx_seq = cp->cp_next_rx_seq;
cinfo6->laddr = conn->c_laddr;
cinfo6->faddr = conn->c_faddr;
strncpy(cinfo6->transport, conn->c_trans->t_name,
sizeof(cinfo6->transport));
cinfo6->flags = 0;
rds_conn_info_set(cinfo6->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
SENDING);
/* XXX Future: return the state rather than these funky bits */
rds_conn_info_set(cinfo6->flags,
atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
CONNECTING);
rds_conn_info_set(cinfo6->flags,
atomic_read(&cp->cp_state) == RDS_CONN_UP,
CONNECTED);
/* Just return 1 as there is no error case. This is a helper function
* for rds_walk_conn_path_info() and it wants a return value.
*/
return 1;
}
#endif
static void rds_conn_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
u64 buffer[(sizeof(struct rds_info_connection) + 7) / 8];
rds_walk_conn_path_info(sock, len, iter, lens,
rds_conn_info_visitor,
buffer,
sizeof(struct rds_info_connection));
}
#if IS_ENABLED(CONFIG_IPV6)
static void rds6_conn_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
u64 buffer[(sizeof(struct rds6_info_connection) + 7) / 8];
rds_walk_conn_path_info(sock, len, iter, lens,
rds6_conn_info_visitor,
buffer,
sizeof(struct rds6_info_connection));
}
#endif
int rds_conn_init(void)
{
int ret;
ret = rds_loop_net_init(); /* register pernet callback */
if (ret)
return ret;
rds_conn_slab = kmem_cache_create("rds_connection",
sizeof(struct rds_connection),
0, 0, NULL);
if (!rds_conn_slab) {
rds_loop_net_exit();
return -ENOMEM;
}
rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
rds_info_register_func(RDS_INFO_SEND_MESSAGES,
rds_conn_message_info_send);
rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
rds_conn_message_info_retrans);
#if IS_ENABLED(CONFIG_IPV6)
rds_info_register_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
rds_info_register_func(RDS6_INFO_SEND_MESSAGES,
rds6_conn_message_info_send);
rds_info_register_func(RDS6_INFO_RETRANS_MESSAGES,
rds6_conn_message_info_retrans);
#endif
return 0;
}
void rds_conn_exit(void)
{
rds_loop_net_exit(); /* unregister pernet callback */
rds_loop_exit();
WARN_ON(!hlist_empty(rds_conn_hash));
kmem_cache_destroy(rds_conn_slab);
rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
rds_conn_message_info_send);
rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
rds_conn_message_info_retrans);
#if IS_ENABLED(CONFIG_IPV6)
rds_info_deregister_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
rds_info_deregister_func(RDS6_INFO_SEND_MESSAGES,
rds6_conn_message_info_send);
rds_info_deregister_func(RDS6_INFO_RETRANS_MESSAGES,
rds6_conn_message_info_retrans);
#endif
}
/*
* Force a disconnect
*/
void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
{
atomic_set(&cp->cp_state, RDS_CONN_ERROR);
rcu_read_lock();
if (!destroy && rds_destroy_pending(cp->cp_conn)) {
rcu_read_unlock();
return;
}
queue_work(rds_wq, &cp->cp_down_w);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rds_conn_path_drop);
void rds_conn_drop(struct rds_connection *conn)
{
WARN_ON(conn->c_trans->t_mp_capable);
rds_conn_path_drop(&conn->c_path[0], false);
}
EXPORT_SYMBOL_GPL(rds_conn_drop);
/*
* If the connection is down, trigger a connect. We may have scheduled a
* delayed reconnect however - in this case we should not interfere.
*/
void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
{
rcu_read_lock();
if (rds_destroy_pending(cp->cp_conn)) {
rcu_read_unlock();
return;
}
if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
!test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
/* Check connectivity of all paths
*/
void rds_check_all_paths(struct rds_connection *conn)
{
int i = 0;
do {
rds_conn_path_connect_if_down(&conn->c_path[i]);
} while (++i < conn->c_npaths);
}
void rds_conn_connect_if_down(struct rds_connection *conn)
{
WARN_ON(conn->c_trans->t_mp_capable);
rds_conn_path_connect_if_down(&conn->c_path[0]);
}
EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
void
__rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vprintk(fmt, ap);
va_end(ap);
rds_conn_path_drop(cp, false);
}
| linux-master | net/rds/connection.c |
/*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include "rds.h"
#include "ib.h"
/*
* Locking for IB rings.
* We assume that allocation is always protected by a mutex
* in the caller (this is a valid assumption for the current
* implementation).
*
* Freeing always happens in an interrupt, and hence only
* races with allocations, but not with other free()s.
*
* The interaction between allocation and freeing is that
* the alloc code has to determine the number of free entries.
* To this end, we maintain two counters; an allocation counter
* and a free counter. Both are allowed to run freely, and wrap
* around.
* The number of used entries is always (alloc_ctr - free_ctr) % NR.
*
* The current implementation makes free_ctr atomic. When the
* caller finds an allocation fails, it should set an "alloc fail"
* bit and retry the allocation. The "alloc fail" bit essentially tells
* the CQ completion handlers to wake it up after freeing some
* more entries.
*/
/*
* This only happens on shutdown.
*/
DECLARE_WAIT_QUEUE_HEAD(rds_ib_ring_empty_wait);
void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr)
{
memset(ring, 0, sizeof(*ring));
ring->w_nr = nr;
rdsdebug("ring %p nr %u\n", ring, ring->w_nr);
}
static inline u32 __rds_ib_ring_used(struct rds_ib_work_ring *ring)
{
u32 diff;
/* This assumes that atomic_t has at least as many bits as u32 */
diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr);
BUG_ON(diff > ring->w_nr);
return diff;
}
void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr)
{
/* We only ever get called from the connection setup code,
* prior to creating the QP. */
BUG_ON(__rds_ib_ring_used(ring));
ring->w_nr = nr;
}
static int __rds_ib_ring_empty(struct rds_ib_work_ring *ring)
{
return __rds_ib_ring_used(ring) == 0;
}
u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos)
{
u32 ret = 0, avail;
avail = ring->w_nr - __rds_ib_ring_used(ring);
rdsdebug("ring %p val %u next %u free %u\n", ring, val,
ring->w_alloc_ptr, avail);
if (val && avail) {
ret = min(val, avail);
*pos = ring->w_alloc_ptr;
ring->w_alloc_ptr = (ring->w_alloc_ptr + ret) % ring->w_nr;
ring->w_alloc_ctr += ret;
}
return ret;
}
void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val)
{
ring->w_free_ptr = (ring->w_free_ptr + val) % ring->w_nr;
atomic_add(val, &ring->w_free_ctr);
if (__rds_ib_ring_empty(ring) &&
waitqueue_active(&rds_ib_ring_empty_wait))
wake_up(&rds_ib_ring_empty_wait);
}
void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val)
{
ring->w_alloc_ptr = (ring->w_alloc_ptr - val) % ring->w_nr;
ring->w_alloc_ctr -= val;
}
int rds_ib_ring_empty(struct rds_ib_work_ring *ring)
{
return __rds_ib_ring_empty(ring);
}
int rds_ib_ring_low(struct rds_ib_work_ring *ring)
{
return __rds_ib_ring_used(ring) <= (ring->w_nr >> 1);
}
/*
* returns the oldest allocated ring entry. This will be the next one
* freed. This can't be called if there are none allocated.
*/
u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring)
{
return ring->w_free_ptr;
}
/*
* returns the number of completed work requests.
*/
u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest)
{
u32 ret;
if (oldest <= (unsigned long long)wr_id)
ret = (unsigned long long)wr_id - oldest + 1;
else
ret = ring->w_nr - oldest + (unsigned long long)wr_id + 1;
rdsdebug("ring %p ret %u wr_id %u oldest %u\n", ring, ret,
wr_id, oldest);
return ret;
}
| linux-master | net/rds/ib_ring.c |
/*
* Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/sched/clock.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <rdma/rdma_cm.h>
#include "rds_single_path.h"
#include "rds.h"
#include "ib.h"
static struct kmem_cache *rds_ib_incoming_slab;
static struct kmem_cache *rds_ib_frag_slab;
static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
{
struct rds_ib_recv_work *recv;
u32 i;
for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
struct ib_sge *sge;
recv->r_ibinc = NULL;
recv->r_frag = NULL;
recv->r_wr.next = NULL;
recv->r_wr.wr_id = i;
recv->r_wr.sg_list = recv->r_sge;
recv->r_wr.num_sge = RDS_IB_RECV_SGE;
sge = &recv->r_sge[0];
sge->addr = ic->i_recv_hdrs_dma[i];
sge->length = sizeof(struct rds_header);
sge->lkey = ic->i_pd->local_dma_lkey;
sge = &recv->r_sge[1];
sge->addr = 0;
sge->length = RDS_FRAG_SIZE;
sge->lkey = ic->i_pd->local_dma_lkey;
}
}
/*
* The entire 'from' list, including the from element itself, is put on
* to the tail of the 'to' list.
*/
static void list_splice_entire_tail(struct list_head *from,
struct list_head *to)
{
struct list_head *from_last = from->prev;
list_splice_tail(from_last, to);
list_add_tail(from_last, to);
}
static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
{
struct list_head *tmp;
tmp = xchg(&cache->xfer, NULL);
if (tmp) {
if (cache->ready)
list_splice_entire_tail(tmp, cache->ready);
else
cache->ready = tmp;
}
}
static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp)
{
struct rds_ib_cache_head *head;
int cpu;
cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp);
if (!cache->percpu)
return -ENOMEM;
for_each_possible_cpu(cpu) {
head = per_cpu_ptr(cache->percpu, cpu);
head->first = NULL;
head->count = 0;
}
cache->xfer = NULL;
cache->ready = NULL;
return 0;
}
int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp)
{
int ret;
ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp);
if (!ret) {
ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp);
if (ret)
free_percpu(ic->i_cache_incs.percpu);
}
return ret;
}
static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
struct list_head *caller_list)
{
struct rds_ib_cache_head *head;
int cpu;
for_each_possible_cpu(cpu) {
head = per_cpu_ptr(cache->percpu, cpu);
if (head->first) {
list_splice_entire_tail(head->first, caller_list);
head->first = NULL;
}
}
if (cache->ready) {
list_splice_entire_tail(cache->ready, caller_list);
cache->ready = NULL;
}
}
void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
{
struct rds_ib_incoming *inc;
struct rds_ib_incoming *inc_tmp;
struct rds_page_frag *frag;
struct rds_page_frag *frag_tmp;
LIST_HEAD(list);
rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
free_percpu(ic->i_cache_incs.percpu);
list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
list_del(&inc->ii_cache_entry);
WARN_ON(!list_empty(&inc->ii_frags));
kmem_cache_free(rds_ib_incoming_slab, inc);
atomic_dec(&rds_ib_allocation);
}
rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
free_percpu(ic->i_cache_frags.percpu);
list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
list_del(&frag->f_cache_entry);
WARN_ON(!list_empty(&frag->f_item));
kmem_cache_free(rds_ib_frag_slab, frag);
}
}
/* fwd decl */
static void rds_ib_recv_cache_put(struct list_head *new_item,
struct rds_ib_refill_cache *cache);
static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
/* Recycle frag and attached recv buffer f_sg */
static void rds_ib_frag_free(struct rds_ib_connection *ic,
struct rds_page_frag *frag)
{
rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
}
/* Recycle inc after freeing attached frags */
void rds_ib_inc_free(struct rds_incoming *inc)
{
struct rds_ib_incoming *ibinc;
struct rds_page_frag *frag;
struct rds_page_frag *pos;
struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
/* Free attached frags */
list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
list_del_init(&frag->f_item);
rds_ib_frag_free(ic, frag);
}
BUG_ON(!list_empty(&ibinc->ii_frags));
rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
}
static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
struct rds_ib_recv_work *recv)
{
if (recv->r_ibinc) {
rds_inc_put(&recv->r_ibinc->ii_inc);
recv->r_ibinc = NULL;
}
if (recv->r_frag) {
ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
rds_ib_frag_free(ic, recv->r_frag);
recv->r_frag = NULL;
}
}
void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
{
u32 i;
for (i = 0; i < ic->i_recv_ring.w_nr; i++)
rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
}
static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
gfp_t slab_mask)
{
struct rds_ib_incoming *ibinc;
struct list_head *cache_item;
int avail_allocs;
cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
if (cache_item) {
ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
} else {
avail_allocs = atomic_add_unless(&rds_ib_allocation,
1, rds_ib_sysctl_max_recv_allocation);
if (!avail_allocs) {
rds_ib_stats_inc(s_ib_rx_alloc_limit);
return NULL;
}
ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
if (!ibinc) {
atomic_dec(&rds_ib_allocation);
return NULL;
}
rds_ib_stats_inc(s_ib_rx_total_incs);
}
INIT_LIST_HEAD(&ibinc->ii_frags);
rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr);
return ibinc;
}
static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
gfp_t slab_mask, gfp_t page_mask)
{
struct rds_page_frag *frag;
struct list_head *cache_item;
int ret;
cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
if (cache_item) {
frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
} else {
frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
if (!frag)
return NULL;
sg_init_table(&frag->f_sg, 1);
ret = rds_page_remainder_alloc(&frag->f_sg,
RDS_FRAG_SIZE, page_mask);
if (ret) {
kmem_cache_free(rds_ib_frag_slab, frag);
return NULL;
}
rds_ib_stats_inc(s_ib_rx_total_frags);
}
INIT_LIST_HEAD(&frag->f_item);
return frag;
}
static int rds_ib_recv_refill_one(struct rds_connection *conn,
struct rds_ib_recv_work *recv, gfp_t gfp)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_sge *sge;
int ret = -ENOMEM;
gfp_t slab_mask = gfp;
gfp_t page_mask = gfp;
if (gfp & __GFP_DIRECT_RECLAIM) {
slab_mask = GFP_KERNEL;
page_mask = GFP_HIGHUSER;
}
if (!ic->i_cache_incs.ready)
rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
if (!ic->i_cache_frags.ready)
rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
/*
* ibinc was taken from recv if recv contained the start of a message.
* recvs that were continuations will still have this allocated.
*/
if (!recv->r_ibinc) {
recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
if (!recv->r_ibinc)
goto out;
}
WARN_ON(recv->r_frag); /* leak! */
recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
if (!recv->r_frag)
goto out;
ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
1, DMA_FROM_DEVICE);
WARN_ON(ret != 1);
sge = &recv->r_sge[0];
sge->addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
sge->length = sizeof(struct rds_header);
sge = &recv->r_sge[1];
sge->addr = sg_dma_address(&recv->r_frag->f_sg);
sge->length = sg_dma_len(&recv->r_frag->f_sg);
ret = 0;
out:
return ret;
}
static int acquire_refill(struct rds_connection *conn)
{
return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0;
}
static void release_refill(struct rds_connection *conn)
{
clear_bit(RDS_RECV_REFILL, &conn->c_flags);
smp_mb__after_atomic();
/* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
* hot path and finding waiters is very rare. We don't want to walk
* the system-wide hashed waitqueue buckets in the fast path only to
* almost never find waiters.
*/
if (waitqueue_active(&conn->c_waitq))
wake_up_all(&conn->c_waitq);
}
/*
* This tries to allocate and post unused work requests after making sure that
* they have all the allocations they need to queue received fragments into
* sockets.
*/
void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_recv_work *recv;
unsigned int posted = 0;
int ret = 0;
bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM);
bool must_wake = false;
u32 pos;
/* the goal here is to just make sure that someone, somewhere
* is posting buffers. If we can't get the refill lock,
* let them do their thing
*/
if (!acquire_refill(conn))
return;
while ((prefill || rds_conn_up(conn)) &&
rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
if (pos >= ic->i_recv_ring.w_nr) {
printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
pos);
break;
}
recv = &ic->i_recvs[pos];
ret = rds_ib_recv_refill_one(conn, recv, gfp);
if (ret) {
must_wake = true;
break;
}
rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
(long)sg_dma_address(&recv->r_frag->f_sg));
/* XXX when can this fail? */
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL);
if (ret) {
rds_ib_conn_error(conn, "recv post on "
"%pI6c returned %d, disconnecting and "
"reconnecting\n", &conn->c_faddr,
ret);
break;
}
posted++;
if ((posted > 128 && need_resched()) || posted > 8192) {
must_wake = true;
break;
}
}
/* We're doing flow control - update the window. */
if (ic->i_flowctl && posted)
rds_ib_advertise_credits(conn, posted);
if (ret)
rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
release_refill(conn);
/* if we're called from the softirq handler, we'll be GFP_NOWAIT.
* in this case the ring being low is going to lead to more interrupts
* and we can safely let the softirq code take care of it unless the
* ring is completely empty.
*
* if we're called from krdsd, we'll be GFP_KERNEL. In this case
* we might have raced with the softirq code while we had the refill
* lock held. Use rds_ib_ring_low() instead of ring_empty to decide
* if we should requeue.
*/
if (rds_conn_up(conn) &&
(must_wake ||
(can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
rds_ib_ring_empty(&ic->i_recv_ring))) {
queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
}
if (can_wait)
cond_resched();
}
/*
* We want to recycle several types of recv allocations, like incs and frags.
* To use this, the *_free() function passes in the ptr to a list_head within
* the recyclee, as well as the cache to put it on.
*
* First, we put the memory on a percpu list. When this reaches a certain size,
* We move it to an intermediate non-percpu list in a lockless manner, with some
* xchg/compxchg wizardry.
*
* N.B. Instead of a list_head as the anchor, we use a single pointer, which can
* be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
* list_empty() will return true with one element is actually present.
*/
static void rds_ib_recv_cache_put(struct list_head *new_item,
struct rds_ib_refill_cache *cache)
{
unsigned long flags;
struct list_head *old, *chpfirst;
local_irq_save(flags);
chpfirst = __this_cpu_read(cache->percpu->first);
if (!chpfirst)
INIT_LIST_HEAD(new_item);
else /* put on front */
list_add_tail(new_item, chpfirst);
__this_cpu_write(cache->percpu->first, new_item);
__this_cpu_inc(cache->percpu->count);
if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
goto end;
/*
* Return our per-cpu first list to the cache's xfer by atomically
* grabbing the current xfer list, appending it to our per-cpu list,
* and then atomically returning that entire list back to the
* cache's xfer list as long as it's still empty.
*/
do {
old = xchg(&cache->xfer, NULL);
if (old)
list_splice_entire_tail(old, chpfirst);
old = cmpxchg(&cache->xfer, NULL, chpfirst);
} while (old);
__this_cpu_write(cache->percpu->first, NULL);
__this_cpu_write(cache->percpu->count, 0);
end:
local_irq_restore(flags);
}
static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
{
struct list_head *head = cache->ready;
if (head) {
if (!list_empty(head)) {
cache->ready = head->next;
list_del_init(head);
} else
cache->ready = NULL;
}
return head;
}
int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
{
struct rds_ib_incoming *ibinc;
struct rds_page_frag *frag;
unsigned long to_copy;
unsigned long frag_off = 0;
int copied = 0;
int ret;
u32 len;
ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
len = be32_to_cpu(inc->i_hdr.h_len);
while (iov_iter_count(to) && copied < len) {
if (frag_off == RDS_FRAG_SIZE) {
frag = list_entry(frag->f_item.next,
struct rds_page_frag, f_item);
frag_off = 0;
}
to_copy = min_t(unsigned long, iov_iter_count(to),
RDS_FRAG_SIZE - frag_off);
to_copy = min_t(unsigned long, to_copy, len - copied);
/* XXX needs + offset for multiple recvs per page */
rds_stats_add(s_copy_to_user, to_copy);
ret = copy_page_to_iter(sg_page(&frag->f_sg),
frag->f_sg.offset + frag_off,
to_copy,
to);
if (ret != to_copy)
return -EFAULT;
frag_off += to_copy;
copied += to_copy;
}
return copied;
}
/* ic starts out kzalloc()ed */
void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
{
struct ib_send_wr *wr = &ic->i_ack_wr;
struct ib_sge *sge = &ic->i_ack_sge;
sge->addr = ic->i_ack_dma;
sge->length = sizeof(struct rds_header);
sge->lkey = ic->i_pd->local_dma_lkey;
wr->sg_list = sge;
wr->num_sge = 1;
wr->opcode = IB_WR_SEND;
wr->wr_id = RDS_IB_ACK_WR_ID;
wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
}
/*
* You'd think that with reliable IB connections you wouldn't need to ack
* messages that have been received. The problem is that IB hardware generates
* an ack message before it has DMAed the message into memory. This creates a
* potential message loss if the HCA is disabled for any reason between when it
* sends the ack and before the message is DMAed and processed. This is only a
* potential issue if another HCA is available for fail-over.
*
* When the remote host receives our ack they'll free the sent message from
* their send queue. To decrease the latency of this we always send an ack
* immediately after we've received messages.
*
* For simplicity, we only have one ack in flight at a time. This puts
* pressure on senders to have deep enough send queues to absorb the latency of
* a single ack frame being in flight. This might not be good enough.
*
* This is implemented by have a long-lived send_wr and sge which point to a
* statically allocated ack frame. This ack wr does not fall under the ring
* accounting that the tx and rx wrs do. The QP attribute specifically makes
* room for it beyond the ring size. Send completion notices its special
* wr_id and avoids working with the ring in that case.
*/
#ifndef KERNEL_HAS_ATOMIC64
void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
{
unsigned long flags;
spin_lock_irqsave(&ic->i_ack_lock, flags);
ic->i_ack_next = seq;
if (ack_required)
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
spin_unlock_irqrestore(&ic->i_ack_lock, flags);
}
static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
{
unsigned long flags;
u64 seq;
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
spin_lock_irqsave(&ic->i_ack_lock, flags);
seq = ic->i_ack_next;
spin_unlock_irqrestore(&ic->i_ack_lock, flags);
return seq;
}
#else
void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
{
atomic64_set(&ic->i_ack_next, seq);
if (ack_required) {
smp_mb__before_atomic();
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
}
}
static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
{
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
smp_mb__after_atomic();
return atomic64_read(&ic->i_ack_next);
}
#endif
static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
{
struct rds_header *hdr = ic->i_ack;
u64 seq;
int ret;
seq = rds_ib_get_ack(ic);
rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, ic->i_ack_dma,
sizeof(*hdr), DMA_TO_DEVICE);
rds_message_populate_header(hdr, 0, 0, 0);
hdr->h_ack = cpu_to_be64(seq);
hdr->h_credit = adv_credits;
rds_message_make_checksum(hdr);
ib_dma_sync_single_for_device(ic->rds_ibdev->dev, ic->i_ack_dma,
sizeof(*hdr), DMA_TO_DEVICE);
ic->i_ack_queued = jiffies;
ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL);
if (unlikely(ret)) {
/* Failed to send. Release the WR, and
* force another ACK.
*/
clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
rds_ib_stats_inc(s_ib_ack_send_failure);
rds_ib_conn_error(ic->conn, "sending ack failed\n");
} else
rds_ib_stats_inc(s_ib_ack_sent);
}
/*
* There are 3 ways of getting acknowledgements to the peer:
* 1. We call rds_ib_attempt_ack from the recv completion handler
* to send an ACK-only frame.
* However, there can be only one such frame in the send queue
* at any time, so we may have to postpone it.
* 2. When another (data) packet is transmitted while there's
* an ACK in the queue, we piggyback the ACK sequence number
* on the data packet.
* 3. If the ACK WR is done sending, we get called from the
* send queue completion handler, and check whether there's
* another ACK pending (postponed because the WR was on the
* queue). If so, we transmit it.
*
* We maintain 2 variables:
* - i_ack_flags, which keeps track of whether the ACK WR
* is currently in the send queue or not (IB_ACK_IN_FLIGHT)
* - i_ack_next, which is the last sequence number we received
*
* Potentially, send queue and receive queue handlers can run concurrently.
* It would be nice to not have to use a spinlock to synchronize things,
* but the one problem that rules this out is that 64bit updates are
* not atomic on all platforms. Things would be a lot simpler if
* we had atomic64 or maybe cmpxchg64 everywhere.
*
* Reconnecting complicates this picture just slightly. When we
* reconnect, we may be seeing duplicate packets. The peer
* is retransmitting them, because it hasn't seen an ACK for
* them. It is important that we ACK these.
*
* ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
* this flag set *MUST* be acknowledged immediately.
*/
/*
* When we get here, we're called from the recv queue handler.
* Check whether we ought to transmit an ACK.
*/
void rds_ib_attempt_ack(struct rds_ib_connection *ic)
{
unsigned int adv_credits;
if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
return;
if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
rds_ib_stats_inc(s_ib_ack_send_delayed);
return;
}
/* Can we get a send credit? */
if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
rds_ib_stats_inc(s_ib_tx_throttle);
clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
return;
}
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
rds_ib_send_ack(ic, adv_credits);
}
/*
* We get here from the send completion handler, when the
* adapter tells us the ACK frame was sent.
*/
void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
{
clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
rds_ib_attempt_ack(ic);
}
/*
* This is called by the regular xmit code when it wants to piggyback
* an ACK on an outgoing frame.
*/
u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
{
if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
rds_ib_stats_inc(s_ib_ack_send_piggybacked);
return rds_ib_get_ack(ic);
}
/*
* It's kind of lame that we're copying from the posted receive pages into
* long-lived bitmaps. We could have posted the bitmaps and rdma written into
* them. But receiving new congestion bitmaps should be a *rare* event, so
* hopefully we won't need to invest that complexity in making it more
* efficient. By copying we can share a simpler core with TCP which has to
* copy.
*/
static void rds_ib_cong_recv(struct rds_connection *conn,
struct rds_ib_incoming *ibinc)
{
struct rds_cong_map *map;
unsigned int map_off;
unsigned int map_page;
struct rds_page_frag *frag;
unsigned long frag_off;
unsigned long to_copy;
unsigned long copied;
__le64 uncongested = 0;
void *addr;
/* catch completely corrupt packets */
if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
return;
map = conn->c_fcong;
map_page = 0;
map_off = 0;
frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
frag_off = 0;
copied = 0;
while (copied < RDS_CONG_MAP_BYTES) {
__le64 *src, *dst;
unsigned int k;
to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
addr = kmap_atomic(sg_page(&frag->f_sg));
src = addr + frag->f_sg.offset + frag_off;
dst = (void *)map->m_page_addrs[map_page] + map_off;
for (k = 0; k < to_copy; k += 8) {
/* Record ports that became uncongested, ie
* bits that changed from 0 to 1. */
uncongested |= ~(*src) & *dst;
*dst++ = *src++;
}
kunmap_atomic(addr);
copied += to_copy;
map_off += to_copy;
if (map_off == PAGE_SIZE) {
map_off = 0;
map_page++;
}
frag_off += to_copy;
if (frag_off == RDS_FRAG_SIZE) {
frag = list_entry(frag->f_item.next,
struct rds_page_frag, f_item);
frag_off = 0;
}
}
/* the congestion map is in little endian order */
rds_cong_map_updated(map, le64_to_cpu(uncongested));
}
static void rds_ib_process_recv(struct rds_connection *conn,
struct rds_ib_recv_work *recv, u32 data_len,
struct rds_ib_ack_state *state)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_incoming *ibinc = ic->i_ibinc;
struct rds_header *ihdr, *hdr;
dma_addr_t dma_addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
/* XXX shut down the connection if port 0,0 are seen? */
rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
data_len);
if (data_len < sizeof(struct rds_header)) {
rds_ib_conn_error(conn, "incoming message "
"from %pI6c didn't include a "
"header, disconnecting and "
"reconnecting\n",
&conn->c_faddr);
return;
}
data_len -= sizeof(struct rds_header);
ihdr = ic->i_recv_hdrs[recv - ic->i_recvs];
ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, dma_addr,
sizeof(*ihdr), DMA_FROM_DEVICE);
/* Validate the checksum. */
if (!rds_message_verify_checksum(ihdr)) {
rds_ib_conn_error(conn, "incoming message "
"from %pI6c has corrupted header - "
"forcing a reconnect\n",
&conn->c_faddr);
rds_stats_inc(s_recv_drop_bad_checksum);
goto done;
}
/* Process the ACK sequence which comes with every packet */
state->ack_recv = be64_to_cpu(ihdr->h_ack);
state->ack_recv_valid = 1;
/* Process the credits update if there was one */
if (ihdr->h_credit)
rds_ib_send_add_credits(conn, ihdr->h_credit);
if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
/* This is an ACK-only packet. The fact that it gets
* special treatment here is that historically, ACKs
* were rather special beasts.
*/
rds_ib_stats_inc(s_ib_ack_received);
/*
* Usually the frags make their way on to incs and are then freed as
* the inc is freed. We don't go that route, so we have to drop the
* page ref ourselves. We can't just leave the page on the recv
* because that confuses the dma mapping of pages and each recv's use
* of a partial page.
*
* FIXME: Fold this into the code path below.
*/
rds_ib_frag_free(ic, recv->r_frag);
recv->r_frag = NULL;
goto done;
}
/*
* If we don't already have an inc on the connection then this
* fragment has a header and starts a message.. copy its header
* into the inc and save the inc so we can hang upcoming fragments
* off its list.
*/
if (!ibinc) {
ibinc = recv->r_ibinc;
recv->r_ibinc = NULL;
ic->i_ibinc = ibinc;
hdr = &ibinc->ii_inc.i_hdr;
ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
local_clock();
memcpy(hdr, ihdr, sizeof(*hdr));
ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
local_clock();
rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
ic->i_recv_data_rem, hdr->h_flags);
} else {
hdr = &ibinc->ii_inc.i_hdr;
/* We can't just use memcmp here; fragments of a
* single message may carry different ACKs */
if (hdr->h_sequence != ihdr->h_sequence ||
hdr->h_len != ihdr->h_len ||
hdr->h_sport != ihdr->h_sport ||
hdr->h_dport != ihdr->h_dport) {
rds_ib_conn_error(conn,
"fragment header mismatch; forcing reconnect\n");
goto done;
}
}
list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
recv->r_frag = NULL;
if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
ic->i_recv_data_rem -= RDS_FRAG_SIZE;
else {
ic->i_recv_data_rem = 0;
ic->i_ibinc = NULL;
if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) {
rds_ib_cong_recv(conn, ibinc);
} else {
rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr,
&ibinc->ii_inc, GFP_ATOMIC);
state->ack_next = be64_to_cpu(hdr->h_sequence);
state->ack_next_valid = 1;
}
/* Evaluate the ACK_REQUIRED flag *after* we received
* the complete frame, and after bumping the next_rx
* sequence. */
if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
rds_stats_inc(s_recv_ack_required);
state->ack_required = 1;
}
rds_inc_put(&ibinc->ii_inc);
}
done:
ib_dma_sync_single_for_device(ic->rds_ibdev->dev, dma_addr,
sizeof(*ihdr), DMA_FROM_DEVICE);
}
void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
struct ib_wc *wc,
struct rds_ib_ack_state *state)
{
struct rds_connection *conn = ic->conn;
struct rds_ib_recv_work *recv;
rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
(unsigned long long)wc->wr_id, wc->status,
ib_wc_status_msg(wc->status), wc->byte_len,
be32_to_cpu(wc->ex.imm_data));
rds_ib_stats_inc(s_ib_rx_cq_event);
recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1,
DMA_FROM_DEVICE);
/* Also process recvs in connecting state because it is possible
* to get a recv completion _before_ the rdmacm ESTABLISHED
* event is processed.
*/
if (wc->status == IB_WC_SUCCESS) {
rds_ib_process_recv(conn, recv, wc->byte_len, state);
} else {
/* We expect errors as the qp is drained during shutdown */
if (rds_conn_up(conn) || rds_conn_connecting(conn))
rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
&conn->c_laddr, &conn->c_faddr,
conn->c_tos, wc->status,
ib_wc_status_msg(wc->status),
wc->vendor_err);
}
/* rds_ib_process_recv() doesn't always consume the frag, and
* we might not have called it at all if the wc didn't indicate
* success. We already unmapped the frag's pages, though, and
* the following rds_ib_ring_free() call tells the refill path
* that it will not find an allocated frag here. Make sure we
* keep that promise by freeing a frag that's still on the ring.
*/
if (recv->r_frag) {
rds_ib_frag_free(ic, recv->r_frag);
recv->r_frag = NULL;
}
rds_ib_ring_free(&ic->i_recv_ring, 1);
/* If we ever end up with a really empty receive ring, we're
* in deep trouble, as the sender will definitely see RNR
* timeouts. */
if (rds_ib_ring_empty(&ic->i_recv_ring))
rds_ib_stats_inc(s_ib_rx_ring_empty);
if (rds_ib_ring_low(&ic->i_recv_ring)) {
rds_ib_recv_refill(conn, 0, GFP_NOWAIT | __GFP_NOWARN);
rds_ib_stats_inc(s_ib_rx_refill_from_cq);
}
}
int rds_ib_recv_path(struct rds_conn_path *cp)
{
struct rds_connection *conn = cp->cp_conn;
struct rds_ib_connection *ic = conn->c_transport_data;
rdsdebug("conn %p\n", conn);
if (rds_conn_up(conn)) {
rds_ib_attempt_ack(ic);
rds_ib_recv_refill(conn, 0, GFP_KERNEL);
rds_ib_stats_inc(s_ib_rx_refill_from_thread);
}
return 0;
}
int rds_ib_recv_init(void)
{
struct sysinfo si;
int ret = -ENOMEM;
/* Default to 30% of all available RAM for recv memory */
si_meminfo(&si);
rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
rds_ib_incoming_slab =
kmem_cache_create_usercopy("rds_ib_incoming",
sizeof(struct rds_ib_incoming),
0, SLAB_HWCACHE_ALIGN,
offsetof(struct rds_ib_incoming,
ii_inc.i_usercopy),
sizeof(struct rds_inc_usercopy),
NULL);
if (!rds_ib_incoming_slab)
goto out;
rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
sizeof(struct rds_page_frag),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!rds_ib_frag_slab) {
kmem_cache_destroy(rds_ib_incoming_slab);
rds_ib_incoming_slab = NULL;
} else
ret = 0;
out:
return ret;
}
void rds_ib_recv_exit(void)
{
WARN_ON(atomic_read(&rds_ib_allocation));
kmem_cache_destroy(rds_ib_incoming_slab);
kmem_cache_destroy(rds_ib_frag_slab);
}
| linux-master | net/rds/ib_recv.c |
/*
* Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/export.h>
#include "rds.h"
/*
* All of connection management is simplified by serializing it through
* work queues that execute in a connection managing thread.
*
* TCP wants to send acks through sendpage() in response to data_ready(),
* but it needs a process context to do so.
*
* The receive paths need to allocate but can't drop packets (!) so we have
* a thread around to block allocating if the receive fast path sees an
* allocation failure.
*/
/* Grand Unified Theory of connection life cycle:
* At any point in time, the connection can be in one of these states:
* DOWN, CONNECTING, UP, DISCONNECTING, ERROR
*
* The following transitions are possible:
* ANY -> ERROR
* UP -> DISCONNECTING
* ERROR -> DISCONNECTING
* DISCONNECTING -> DOWN
* DOWN -> CONNECTING
* CONNECTING -> UP
*
* Transition to state DISCONNECTING/DOWN:
* - Inside the shutdown worker; synchronizes with xmit path
* through RDS_IN_XMIT, and with connection management callbacks
* via c_cm_lock.
*
* For receive callbacks, we rely on the underlying transport
* (TCP, IB/RDMA) to provide the necessary synchronisation.
*/
struct workqueue_struct *rds_wq;
EXPORT_SYMBOL_GPL(rds_wq);
void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
{
if (!rds_conn_path_transition(cp, curr, RDS_CONN_UP)) {
printk(KERN_WARNING "%s: Cannot transition to state UP, "
"current state is %d\n",
__func__,
atomic_read(&cp->cp_state));
rds_conn_path_drop(cp, false);
return;
}
rdsdebug("conn %p for %pI6c to %pI6c complete\n",
cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr);
cp->cp_reconnect_jiffies = 0;
set_bit(0, &cp->cp_conn->c_map_queued);
rcu_read_lock();
if (!rds_destroy_pending(cp->cp_conn)) {
queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
}
rcu_read_unlock();
cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION;
}
EXPORT_SYMBOL_GPL(rds_connect_path_complete);
void rds_connect_complete(struct rds_connection *conn)
{
rds_connect_path_complete(&conn->c_path[0], RDS_CONN_CONNECTING);
}
EXPORT_SYMBOL_GPL(rds_connect_complete);
/*
* This random exponential backoff is relied on to eventually resolve racing
* connects.
*
* If connect attempts race then both parties drop both connections and come
* here to wait for a random amount of time before trying again. Eventually
* the backoff range will be so much greater than the time it takes to
* establish a connection that one of the pair will establish the connection
* before the other's random delay fires.
*
* Connection attempts that arrive while a connection is already established
* are also considered to be racing connects. This lets a connection from
* a rebooted machine replace an existing stale connection before the transport
* notices that the connection has failed.
*
* We should *always* start with a random backoff; otherwise a broken connection
* will always take several iterations to be re-established.
*/
void rds_queue_reconnect(struct rds_conn_path *cp)
{
unsigned long rand;
struct rds_connection *conn = cp->cp_conn;
rdsdebug("conn %p for %pI6c to %pI6c reconnect jiffies %lu\n",
conn, &conn->c_laddr, &conn->c_faddr,
cp->cp_reconnect_jiffies);
/* let peer with smaller addr initiate reconnect, to avoid duels */
if (conn->c_trans->t_type == RDS_TRANS_TCP &&
rds_addr_cmp(&conn->c_laddr, &conn->c_faddr) >= 0)
return;
set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
if (cp->cp_reconnect_jiffies == 0) {
cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
rcu_read_lock();
if (!rds_destroy_pending(cp->cp_conn))
queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
rcu_read_unlock();
return;
}
get_random_bytes(&rand, sizeof(rand));
rdsdebug("%lu delay %lu ceil conn %p for %pI6c -> %pI6c\n",
rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
conn, &conn->c_laddr, &conn->c_faddr);
rcu_read_lock();
if (!rds_destroy_pending(cp->cp_conn))
queue_delayed_work(rds_wq, &cp->cp_conn_w,
rand % cp->cp_reconnect_jiffies);
rcu_read_unlock();
cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2,
rds_sysctl_reconnect_max_jiffies);
}
void rds_connect_worker(struct work_struct *work)
{
struct rds_conn_path *cp = container_of(work,
struct rds_conn_path,
cp_conn_w.work);
struct rds_connection *conn = cp->cp_conn;
int ret;
if (cp->cp_index > 0 &&
rds_addr_cmp(&cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr) >= 0)
return;
clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
ret = rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
if (ret) {
ret = conn->c_trans->conn_path_connect(cp);
rdsdebug("conn %p for %pI6c to %pI6c dispatched, ret %d\n",
conn, &conn->c_laddr, &conn->c_faddr, ret);
if (ret) {
if (rds_conn_path_transition(cp,
RDS_CONN_CONNECTING,
RDS_CONN_DOWN))
rds_queue_reconnect(cp);
else
rds_conn_path_error(cp, "connect failed\n");
}
}
}
void rds_send_worker(struct work_struct *work)
{
struct rds_conn_path *cp = container_of(work,
struct rds_conn_path,
cp_send_w.work);
int ret;
if (rds_conn_path_state(cp) == RDS_CONN_UP) {
clear_bit(RDS_LL_SEND_FULL, &cp->cp_flags);
ret = rds_send_xmit(cp);
cond_resched();
rdsdebug("conn %p ret %d\n", cp->cp_conn, ret);
switch (ret) {
case -EAGAIN:
rds_stats_inc(s_send_immediate_retry);
queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
break;
case -ENOMEM:
rds_stats_inc(s_send_delayed_retry);
queue_delayed_work(rds_wq, &cp->cp_send_w, 2);
break;
default:
break;
}
}
}
void rds_recv_worker(struct work_struct *work)
{
struct rds_conn_path *cp = container_of(work,
struct rds_conn_path,
cp_recv_w.work);
int ret;
if (rds_conn_path_state(cp) == RDS_CONN_UP) {
ret = cp->cp_conn->c_trans->recv_path(cp);
rdsdebug("conn %p ret %d\n", cp->cp_conn, ret);
switch (ret) {
case -EAGAIN:
rds_stats_inc(s_recv_immediate_retry);
queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
break;
case -ENOMEM:
rds_stats_inc(s_recv_delayed_retry);
queue_delayed_work(rds_wq, &cp->cp_recv_w, 2);
break;
default:
break;
}
}
}
void rds_shutdown_worker(struct work_struct *work)
{
struct rds_conn_path *cp = container_of(work,
struct rds_conn_path,
cp_down_w);
rds_conn_shutdown(cp);
}
void rds_threads_exit(void)
{
destroy_workqueue(rds_wq);
}
int rds_threads_init(void)
{
rds_wq = create_singlethread_workqueue("krdsd");
if (!rds_wq)
return -ENOMEM;
return 0;
}
/* Compare two IPv6 addresses. Return 0 if the two addresses are equal.
* Return 1 if the first is greater. Return -1 if the second is greater.
*/
int rds_addr_cmp(const struct in6_addr *addr1,
const struct in6_addr *addr2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
const __be64 *a1, *a2;
u64 x, y;
a1 = (__be64 *)addr1;
a2 = (__be64 *)addr2;
if (*a1 != *a2) {
if (be64_to_cpu(*a1) < be64_to_cpu(*a2))
return -1;
else
return 1;
} else {
x = be64_to_cpu(*++a1);
y = be64_to_cpu(*++a2);
if (x < y)
return -1;
else if (x > y)
return 1;
else
return 0;
}
#else
u32 a, b;
int i;
for (i = 0; i < 4; i++) {
if (addr1->s6_addr32[i] != addr2->s6_addr32[i]) {
a = ntohl(addr1->s6_addr32[i]);
b = ntohl(addr2->s6_addr32[i]);
if (a < b)
return -1;
else if (a > b)
return 1;
}
}
return 0;
#endif
}
EXPORT_SYMBOL_GPL(rds_addr_cmp);
| linux-master | net/rds/threads.c |
/*
* Copyright (c) 2009, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/module.h>
#include <rdma/rdma_cm.h>
#include "rds_single_path.h"
#include "rdma_transport.h"
#include "ib.h"
/* Global IPv4 and IPv6 RDS RDMA listener cm_id */
static struct rdma_cm_id *rds_rdma_listen_id;
#if IS_ENABLED(CONFIG_IPV6)
static struct rdma_cm_id *rds6_rdma_listen_id;
#endif
/* Per IB specification 7.7.3, service level is a 4-bit field. */
#define TOS_TO_SL(tos) ((tos) & 0xF)
static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event,
bool isv6)
{
/* this can be null in the listening path */
struct rds_connection *conn = cm_id->context;
struct rds_transport *trans;
int ret = 0;
int *err;
u8 len;
rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id,
event->event, rdma_event_msg(event->event));
if (cm_id->device->node_type == RDMA_NODE_IB_CA)
trans = &rds_ib_transport;
/* Prevent shutdown from tearing down the connection
* while we're executing. */
if (conn) {
mutex_lock(&conn->c_cm_lock);
/* If the connection is being shut down, bail out
* right away. We return 0 so cm_id doesn't get
* destroyed prematurely */
if (rds_conn_state(conn) == RDS_CONN_DISCONNECTING) {
/* Reject incoming connections while we're tearing
* down an existing one. */
if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST)
ret = 1;
goto out;
}
}
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = trans->cm_handle_connect(cm_id, event, isv6);
break;
case RDMA_CM_EVENT_ADDR_RESOLVED:
if (conn) {
rdma_set_service_type(cm_id, conn->c_tos);
rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32);
/* XXX do we need to clean up if this fails? */
ret = rdma_resolve_route(cm_id,
RDS_RDMA_RESOLVE_TIMEOUT_MS);
}
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
/* Connection could have been dropped so make sure the
* cm_id is valid before proceeding
*/
if (conn) {
struct rds_ib_connection *ibic;
ibic = conn->c_transport_data;
if (ibic && ibic->i_cm_id == cm_id) {
cm_id->route.path_rec[0].sl =
TOS_TO_SL(conn->c_tos);
ret = trans->cm_initiate_connect(cm_id, isv6);
} else {
rds_conn_drop(conn);
}
}
break;
case RDMA_CM_EVENT_ESTABLISHED:
if (conn)
trans->cm_connect_complete(conn, event);
break;
case RDMA_CM_EVENT_REJECTED:
if (!conn)
break;
err = (int *)rdma_consumer_reject_data(cm_id, event, &len);
if (!err ||
(err && len >= sizeof(*err) &&
((*err) <= RDS_RDMA_REJ_INCOMPAT))) {
pr_warn("RDS/RDMA: conn <%pI6c, %pI6c> rejected, dropping connection\n",
&conn->c_laddr, &conn->c_faddr);
if (!conn->c_tos)
conn->c_proposed_version = RDS_PROTOCOL_COMPAT_VERSION;
rds_conn_drop(conn);
}
rdsdebug("Connection rejected: %s\n",
rdma_reject_msg(cm_id, event->status));
break;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_ADDR_CHANGE:
if (conn)
rds_conn_drop(conn);
break;
case RDMA_CM_EVENT_DISCONNECTED:
if (!conn)
break;
rdsdebug("DISCONNECT event - dropping connection "
"%pI6c->%pI6c\n", &conn->c_laddr,
&conn->c_faddr);
rds_conn_drop(conn);
break;
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
if (conn) {
pr_info("RDS: RDMA_CM_EVENT_TIMEWAIT_EXIT event: dropping connection %pI6c->%pI6c\n",
&conn->c_laddr, &conn->c_faddr);
rds_conn_drop(conn);
}
break;
default:
/* things like device disconnect? */
printk(KERN_ERR "RDS: unknown event %u (%s)!\n",
event->event, rdma_event_msg(event->event));
break;
}
out:
if (conn)
mutex_unlock(&conn->c_cm_lock);
rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event,
rdma_event_msg(event->event), ret);
return ret;
}
int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
return rds_rdma_cm_event_handler_cmn(cm_id, event, false);
}
#if IS_ENABLED(CONFIG_IPV6)
int rds6_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
return rds_rdma_cm_event_handler_cmn(cm_id, event, true);
}
#endif
static int rds_rdma_listen_init_common(rdma_cm_event_handler handler,
struct sockaddr *sa,
struct rdma_cm_id **ret_cm_id)
{
struct rdma_cm_id *cm_id;
int ret;
cm_id = rdma_create_id(&init_net, handler, NULL,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) {
ret = PTR_ERR(cm_id);
printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
"rdma_create_id() returned %d\n", ret);
return ret;
}
/*
* XXX I bet this binds the cm_id to a device. If we want to support
* fail-over we'll have to take this into consideration.
*/
ret = rdma_bind_addr(cm_id, sa);
if (ret) {
printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
"rdma_bind_addr() returned %d\n", ret);
goto out;
}
ret = rdma_listen(cm_id, 128);
if (ret) {
printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
"rdma_listen() returned %d\n", ret);
goto out;
}
rdsdebug("cm %p listening on port %u\n", cm_id, RDS_PORT);
*ret_cm_id = cm_id;
cm_id = NULL;
out:
if (cm_id)
rdma_destroy_id(cm_id);
return ret;
}
/* Initialize the RDS RDMA listeners. We create two listeners for
* compatibility reason. The one on RDS_PORT is used for IPv4
* requests only. The one on RDS_CM_PORT is used for IPv6 requests
* only. So only IPv6 enabled RDS module will communicate using this
* port.
*/
static int rds_rdma_listen_init(void)
{
int ret;
#if IS_ENABLED(CONFIG_IPV6)
struct sockaddr_in6 sin6;
#endif
struct sockaddr_in sin;
sin.sin_family = PF_INET;
sin.sin_addr.s_addr = htonl(INADDR_ANY);
sin.sin_port = htons(RDS_PORT);
ret = rds_rdma_listen_init_common(rds_rdma_cm_event_handler,
(struct sockaddr *)&sin,
&rds_rdma_listen_id);
if (ret != 0)
return ret;
#if IS_ENABLED(CONFIG_IPV6)
sin6.sin6_family = PF_INET6;
sin6.sin6_addr = in6addr_any;
sin6.sin6_port = htons(RDS_CM_PORT);
sin6.sin6_scope_id = 0;
sin6.sin6_flowinfo = 0;
ret = rds_rdma_listen_init_common(rds6_rdma_cm_event_handler,
(struct sockaddr *)&sin6,
&rds6_rdma_listen_id);
/* Keep going even when IPv6 is not enabled in the system. */
if (ret != 0)
rdsdebug("Cannot set up IPv6 RDMA listener\n");
#endif
return 0;
}
static void rds_rdma_listen_stop(void)
{
if (rds_rdma_listen_id) {
rdsdebug("cm %p\n", rds_rdma_listen_id);
rdma_destroy_id(rds_rdma_listen_id);
rds_rdma_listen_id = NULL;
}
#if IS_ENABLED(CONFIG_IPV6)
if (rds6_rdma_listen_id) {
rdsdebug("cm %p\n", rds6_rdma_listen_id);
rdma_destroy_id(rds6_rdma_listen_id);
rds6_rdma_listen_id = NULL;
}
#endif
}
static int __init rds_rdma_init(void)
{
int ret;
ret = rds_ib_init();
if (ret)
goto out;
ret = rds_rdma_listen_init();
if (ret)
rds_ib_exit();
out:
return ret;
}
module_init(rds_rdma_init);
static void __exit rds_rdma_exit(void)
{
/* stop listening first to ensure no new connections are attempted */
rds_rdma_listen_stop();
rds_ib_exit();
}
module_exit(rds_rdma_exit);
MODULE_AUTHOR("Oracle Corporation <[email protected]>");
MODULE_DESCRIPTION("RDS: IB transport");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | net/rds/rdma_transport.c |
/*
* Copyright (c) 2006, 2020 Oracle and/or its affiliates.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/skbuff.h>
#include <linux/list.h>
#include <linux/errqueue.h>
#include "rds.h"
static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
[RDS_EXTHDR_NONE] = 0,
[RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version),
[RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma),
[RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest),
[RDS_EXTHDR_NPATHS] = sizeof(u16),
[RDS_EXTHDR_GEN_NUM] = sizeof(u32),
};
void rds_message_addref(struct rds_message *rm)
{
rdsdebug("addref rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
refcount_inc(&rm->m_refcount);
}
EXPORT_SYMBOL_GPL(rds_message_addref);
static inline bool rds_zcookie_add(struct rds_msg_zcopy_info *info, u32 cookie)
{
struct rds_zcopy_cookies *ck = &info->zcookies;
int ncookies = ck->num;
if (ncookies == RDS_MAX_ZCOOKIES)
return false;
ck->cookies[ncookies] = cookie;
ck->num = ++ncookies;
return true;
}
static struct rds_msg_zcopy_info *rds_info_from_znotifier(struct rds_znotifier *znotif)
{
return container_of(znotif, struct rds_msg_zcopy_info, znotif);
}
void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *q)
{
unsigned long flags;
LIST_HEAD(copy);
struct rds_msg_zcopy_info *info, *tmp;
spin_lock_irqsave(&q->lock, flags);
list_splice(&q->zcookie_head, ©);
INIT_LIST_HEAD(&q->zcookie_head);
spin_unlock_irqrestore(&q->lock, flags);
list_for_each_entry_safe(info, tmp, ©, rs_zcookie_next) {
list_del(&info->rs_zcookie_next);
kfree(info);
}
}
static void rds_rm_zerocopy_callback(struct rds_sock *rs,
struct rds_znotifier *znotif)
{
struct rds_msg_zcopy_info *info;
struct rds_msg_zcopy_queue *q;
u32 cookie = znotif->z_cookie;
struct rds_zcopy_cookies *ck;
struct list_head *head;
unsigned long flags;
mm_unaccount_pinned_pages(&znotif->z_mmp);
q = &rs->rs_zcookie_queue;
spin_lock_irqsave(&q->lock, flags);
head = &q->zcookie_head;
if (!list_empty(head)) {
info = list_first_entry(head, struct rds_msg_zcopy_info,
rs_zcookie_next);
if (rds_zcookie_add(info, cookie)) {
spin_unlock_irqrestore(&q->lock, flags);
kfree(rds_info_from_znotifier(znotif));
/* caller invokes rds_wake_sk_sleep() */
return;
}
}
info = rds_info_from_znotifier(znotif);
ck = &info->zcookies;
memset(ck, 0, sizeof(*ck));
WARN_ON(!rds_zcookie_add(info, cookie));
list_add_tail(&info->rs_zcookie_next, &q->zcookie_head);
spin_unlock_irqrestore(&q->lock, flags);
/* caller invokes rds_wake_sk_sleep() */
}
/*
* This relies on dma_map_sg() not touching sg[].page during merging.
*/
static void rds_message_purge(struct rds_message *rm)
{
unsigned long i, flags;
bool zcopy = false;
if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
return;
spin_lock_irqsave(&rm->m_rs_lock, flags);
if (rm->m_rs) {
struct rds_sock *rs = rm->m_rs;
if (rm->data.op_mmp_znotifier) {
zcopy = true;
rds_rm_zerocopy_callback(rs, rm->data.op_mmp_znotifier);
rds_wake_sk_sleep(rs);
rm->data.op_mmp_znotifier = NULL;
}
sock_put(rds_rs_to_sk(rs));
rm->m_rs = NULL;
}
spin_unlock_irqrestore(&rm->m_rs_lock, flags);
for (i = 0; i < rm->data.op_nents; i++) {
/* XXX will have to put_page for page refs */
if (!zcopy)
__free_page(sg_page(&rm->data.op_sg[i]));
else
put_page(sg_page(&rm->data.op_sg[i]));
}
rm->data.op_nents = 0;
if (rm->rdma.op_active)
rds_rdma_free_op(&rm->rdma);
if (rm->rdma.op_rdma_mr)
kref_put(&rm->rdma.op_rdma_mr->r_kref, __rds_put_mr_final);
if (rm->atomic.op_active)
rds_atomic_free_op(&rm->atomic);
if (rm->atomic.op_rdma_mr)
kref_put(&rm->atomic.op_rdma_mr->r_kref, __rds_put_mr_final);
}
void rds_message_put(struct rds_message *rm)
{
rdsdebug("put rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
WARN(!refcount_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
if (refcount_dec_and_test(&rm->m_refcount)) {
BUG_ON(!list_empty(&rm->m_sock_item));
BUG_ON(!list_empty(&rm->m_conn_item));
rds_message_purge(rm);
kfree(rm);
}
}
EXPORT_SYMBOL_GPL(rds_message_put);
void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
__be16 dport, u64 seq)
{
hdr->h_flags = 0;
hdr->h_sport = sport;
hdr->h_dport = dport;
hdr->h_sequence = cpu_to_be64(seq);
hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
}
EXPORT_SYMBOL_GPL(rds_message_populate_header);
int rds_message_add_extension(struct rds_header *hdr, unsigned int type,
const void *data, unsigned int len)
{
unsigned int ext_len = sizeof(u8) + len;
unsigned char *dst;
/* For now, refuse to add more than one extension header */
if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
return 0;
if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type])
return 0;
if (ext_len >= RDS_HEADER_EXT_SPACE)
return 0;
dst = hdr->h_exthdr;
*dst++ = type;
memcpy(dst, data, len);
dst[len] = RDS_EXTHDR_NONE;
return 1;
}
EXPORT_SYMBOL_GPL(rds_message_add_extension);
/*
* If a message has extension headers, retrieve them here.
* Call like this:
*
* unsigned int pos = 0;
*
* while (1) {
* buflen = sizeof(buffer);
* type = rds_message_next_extension(hdr, &pos, buffer, &buflen);
* if (type == RDS_EXTHDR_NONE)
* break;
* ...
* }
*/
int rds_message_next_extension(struct rds_header *hdr,
unsigned int *pos, void *buf, unsigned int *buflen)
{
unsigned int offset, ext_type, ext_len;
u8 *src = hdr->h_exthdr;
offset = *pos;
if (offset >= RDS_HEADER_EXT_SPACE)
goto none;
/* Get the extension type and length. For now, the
* length is implied by the extension type. */
ext_type = src[offset++];
if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX)
goto none;
ext_len = rds_exthdr_size[ext_type];
if (offset + ext_len > RDS_HEADER_EXT_SPACE)
goto none;
*pos = offset + ext_len;
if (ext_len < *buflen)
*buflen = ext_len;
memcpy(buf, src + offset, *buflen);
return ext_type;
none:
*pos = RDS_HEADER_EXT_SPACE;
*buflen = 0;
return RDS_EXTHDR_NONE;
}
int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset)
{
struct rds_ext_header_rdma_dest ext_hdr;
ext_hdr.h_rdma_rkey = cpu_to_be32(r_key);
ext_hdr.h_rdma_offset = cpu_to_be32(offset);
return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
}
EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
/*
* Each rds_message is allocated with extra space for the scatterlist entries
* rds ops will need. This is to minimize memory allocation count. Then, each rds op
* can grab SGs when initializing its part of the rds_message.
*/
struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
{
struct rds_message *rm;
if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
return NULL;
rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
if (!rm)
goto out;
rm->m_used_sgs = 0;
rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
refcount_set(&rm->m_refcount, 1);
INIT_LIST_HEAD(&rm->m_sock_item);
INIT_LIST_HEAD(&rm->m_conn_item);
spin_lock_init(&rm->m_rs_lock);
init_waitqueue_head(&rm->m_flush_wait);
out:
return rm;
}
/*
* RDS ops use this to grab SG entries from the rm's sg pool.
*/
struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
{
struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
struct scatterlist *sg_ret;
if (nents <= 0) {
pr_warn("rds: alloc sgs failed! nents <= 0\n");
return ERR_PTR(-EINVAL);
}
if (rm->m_used_sgs + nents > rm->m_total_sgs) {
pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n",
rm->m_total_sgs, rm->m_used_sgs, nents);
return ERR_PTR(-ENOMEM);
}
sg_ret = &sg_first[rm->m_used_sgs];
sg_init_table(sg_ret, nents);
rm->m_used_sgs += nents;
return sg_ret;
}
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
{
struct rds_message *rm;
unsigned int i;
int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
int extra_bytes = num_sgs * sizeof(struct scatterlist);
rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
if (!rm)
return ERR_PTR(-ENOMEM);
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
if (IS_ERR(rm->data.op_sg)) {
void *err = ERR_CAST(rm->data.op_sg);
rds_message_put(rm);
return err;
}
for (i = 0; i < rm->data.op_nents; ++i) {
sg_set_page(&rm->data.op_sg[i],
virt_to_page((void *)page_addrs[i]),
PAGE_SIZE, 0);
}
return rm;
}
static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *from)
{
struct scatterlist *sg;
int ret = 0;
int length = iov_iter_count(from);
struct rds_msg_zcopy_info *info;
rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
/*
* now allocate and copy in the data payload.
*/
sg = rm->data.op_sg;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
INIT_LIST_HEAD(&info->rs_zcookie_next);
rm->data.op_mmp_znotifier = &info->znotif;
if (mm_account_pinned_pages(&rm->data.op_mmp_znotifier->z_mmp,
length)) {
ret = -ENOMEM;
goto err;
}
while (iov_iter_count(from)) {
struct page *pages;
size_t start;
ssize_t copied;
copied = iov_iter_get_pages2(from, &pages, PAGE_SIZE,
1, &start);
if (copied < 0) {
struct mmpin *mmp;
int i;
for (i = 0; i < rm->data.op_nents; i++)
put_page(sg_page(&rm->data.op_sg[i]));
mmp = &rm->data.op_mmp_znotifier->z_mmp;
mm_unaccount_pinned_pages(mmp);
ret = -EFAULT;
goto err;
}
length -= copied;
sg_set_page(sg, pages, copied, start);
rm->data.op_nents++;
sg++;
}
WARN_ON_ONCE(length != 0);
return ret;
err:
kfree(info);
rm->data.op_mmp_znotifier = NULL;
return ret;
}
int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
bool zcopy)
{
unsigned long to_copy, nbytes;
unsigned long sg_off;
struct scatterlist *sg;
int ret = 0;
rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
/* now allocate and copy in the data payload. */
sg = rm->data.op_sg;
sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
if (zcopy)
return rds_message_zcopy_from_user(rm, from);
while (iov_iter_count(from)) {
if (!sg_page(sg)) {
ret = rds_page_remainder_alloc(sg, iov_iter_count(from),
GFP_HIGHUSER);
if (ret)
return ret;
rm->data.op_nents++;
sg_off = 0;
}
to_copy = min_t(unsigned long, iov_iter_count(from),
sg->length - sg_off);
rds_stats_add(s_copy_from_user, to_copy);
nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
to_copy, from);
if (nbytes != to_copy)
return -EFAULT;
sg_off += to_copy;
if (sg_off == sg->length)
sg++;
}
return ret;
}
int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
{
struct rds_message *rm;
struct scatterlist *sg;
unsigned long to_copy;
unsigned long vec_off;
int copied;
int ret;
u32 len;
rm = container_of(inc, struct rds_message, m_inc);
len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
sg = rm->data.op_sg;
vec_off = 0;
copied = 0;
while (iov_iter_count(to) && copied < len) {
to_copy = min_t(unsigned long, iov_iter_count(to),
sg->length - vec_off);
to_copy = min_t(unsigned long, to_copy, len - copied);
rds_stats_add(s_copy_to_user, to_copy);
ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off,
to_copy, to);
if (ret != to_copy)
return -EFAULT;
vec_off += to_copy;
copied += to_copy;
if (vec_off == sg->length) {
vec_off = 0;
sg++;
}
}
return copied;
}
/*
* If the message is still on the send queue, wait until the transport
* is done with it. This is particularly important for RDMA operations.
*/
void rds_message_wait(struct rds_message *rm)
{
wait_event_interruptible(rm->m_flush_wait,
!test_bit(RDS_MSG_MAPPED, &rm->m_flags));
}
void rds_message_unmapped(struct rds_message *rm)
{
clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
wake_up_interruptible(&rm->m_flush_wait);
}
EXPORT_SYMBOL_GPL(rds_message_unmapped);
| linux-master | net/rds/message.c |
/*
* Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/in.h>
#include <net/tcp.h>
#include "rds_single_path.h"
#include "rds.h"
#include "tcp.h"
void rds_tcp_xmit_path_prepare(struct rds_conn_path *cp)
{
struct rds_tcp_connection *tc = cp->cp_transport_data;
tcp_sock_set_cork(tc->t_sock->sk, true);
}
void rds_tcp_xmit_path_complete(struct rds_conn_path *cp)
{
struct rds_tcp_connection *tc = cp->cp_transport_data;
tcp_sock_set_cork(tc->t_sock->sk, false);
}
/* the core send_sem serializes this with other xmit and shutdown */
static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
{
struct kvec vec = {
.iov_base = data,
.iov_len = len,
};
struct msghdr msg = {
.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
};
return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
}
/* the core send_sem serializes this with other xmit and shutdown */
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off)
{
struct rds_conn_path *cp = rm->m_inc.i_conn_path;
struct rds_tcp_connection *tc = cp->cp_transport_data;
struct msghdr msg = {};
struct bio_vec bvec;
int done = 0;
int ret = 0;
if (hdr_off == 0) {
/*
* m_ack_seq is set to the sequence number of the last byte of
* header and data. see rds_tcp_is_acked().
*/
tc->t_last_sent_nxt = rds_tcp_write_seq(tc);
rm->m_ack_seq = tc->t_last_sent_nxt +
sizeof(struct rds_header) +
be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
smp_mb__before_atomic();
set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags);
tc->t_last_expected_una = rm->m_ack_seq + 1;
if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
rm, rds_tcp_write_seq(tc),
(unsigned long long)rm->m_ack_seq);
}
if (hdr_off < sizeof(struct rds_header)) {
/* see rds_tcp_write_space() */
set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags);
ret = rds_tcp_sendmsg(tc->t_sock,
(void *)&rm->m_inc.i_hdr + hdr_off,
sizeof(rm->m_inc.i_hdr) - hdr_off);
if (ret < 0)
goto out;
done += ret;
if (hdr_off + done != sizeof(struct rds_header))
goto out;
}
while (sg < rm->data.op_nents) {
msg.msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | MSG_NOSIGNAL;
if (sg + 1 < rm->data.op_nents)
msg.msg_flags |= MSG_MORE;
bvec_set_page(&bvec, sg_page(&rm->data.op_sg[sg]),
rm->data.op_sg[sg].length - off,
rm->data.op_sg[sg].offset + off);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1,
rm->data.op_sg[sg].length - off);
ret = sock_sendmsg(tc->t_sock, &msg);
rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
ret);
if (ret <= 0)
break;
off += ret;
done += ret;
if (off == rm->data.op_sg[sg].length) {
off = 0;
sg++;
}
}
out:
if (ret <= 0) {
/* write_space will hit after EAGAIN, all else fatal */
if (ret == -EAGAIN) {
rds_tcp_stats_inc(s_tcp_sndbuf_full);
ret = 0;
} else {
/* No need to disconnect/reconnect if path_drop
* has already been triggered, because, e.g., of
* an incoming RST.
*/
if (rds_conn_path_up(cp)) {
pr_warn("RDS/tcp: send to %pI6c on cp [%d]"
"returned %d, "
"disconnecting and reconnecting\n",
&conn->c_faddr, cp->cp_index, ret);
rds_conn_path_drop(cp, false);
}
}
}
if (done == 0)
done = ret;
return done;
}
/*
* rm->m_ack_seq is set to the tcp sequence number that corresponds to the
* last byte of the message, including the header. This means that the
* entire message has been received if rm->m_ack_seq is "before" the next
* unacked byte of the TCP sequence space. We have to do very careful
* wrapping 32bit comparisons here.
*/
static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack)
{
if (!test_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags))
return 0;
return (__s32)((u32)rm->m_ack_seq - (u32)ack) < 0;
}
void rds_tcp_write_space(struct sock *sk)
{
void (*write_space)(struct sock *sk);
struct rds_conn_path *cp;
struct rds_tcp_connection *tc;
read_lock_bh(&sk->sk_callback_lock);
cp = sk->sk_user_data;
if (!cp) {
write_space = sk->sk_write_space;
goto out;
}
tc = cp->cp_transport_data;
rdsdebug("write_space for tc %p\n", tc);
write_space = tc->t_orig_write_space;
rds_tcp_stats_inc(s_tcp_write_space_calls);
rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc));
tc->t_last_seen_una = rds_tcp_snd_una(tc);
rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked);
rcu_read_lock();
if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf &&
!rds_destroy_pending(cp->cp_conn))
queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
rcu_read_unlock();
out:
read_unlock_bh(&sk->sk_callback_lock);
/*
* write_space is only called when data leaves tcp's send queue if
* SOCK_NOSPACE is set. We set SOCK_NOSPACE every time we put
* data in tcp's send queue because we use write_space to parse the
* sequence numbers and notice that rds messages have been fully
* received.
*
* tcp's write_space clears SOCK_NOSPACE if the send queue has more
* than a certain amount of space. So we need to set it again *after*
* we call tcp's write_space or else we might only get called on the
* first of a series of incoming tcp acks.
*/
write_space(sk);
if (sk->sk_socket)
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
}
| linux-master | net/rds/tcp_send.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/sock_diag.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/packet_diag.h>
#include <linux/percpu.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include "internal.h"
static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
{
struct packet_diag_info pinfo;
pinfo.pdi_index = po->ifindex;
pinfo.pdi_version = po->tp_version;
pinfo.pdi_reserve = po->tp_reserve;
pinfo.pdi_copy_thresh = po->copy_thresh;
pinfo.pdi_tstamp = READ_ONCE(po->tp_tstamp);
pinfo.pdi_flags = 0;
if (packet_sock_flag(po, PACKET_SOCK_RUNNING))
pinfo.pdi_flags |= PDI_RUNNING;
if (packet_sock_flag(po, PACKET_SOCK_AUXDATA))
pinfo.pdi_flags |= PDI_AUXDATA;
if (packet_sock_flag(po, PACKET_SOCK_ORIGDEV))
pinfo.pdi_flags |= PDI_ORIGDEV;
if (READ_ONCE(po->vnet_hdr_sz))
pinfo.pdi_flags |= PDI_VNETHDR;
if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS))
pinfo.pdi_flags |= PDI_LOSS;
return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
}
static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
{
struct nlattr *mca;
struct packet_mclist *ml;
mca = nla_nest_start_noflag(nlskb, PACKET_DIAG_MCLIST);
if (!mca)
return -EMSGSIZE;
rtnl_lock();
for (ml = po->mclist; ml; ml = ml->next) {
struct packet_diag_mclist *dml;
dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
if (!dml) {
rtnl_unlock();
nla_nest_cancel(nlskb, mca);
return -EMSGSIZE;
}
dml->pdmc_index = ml->ifindex;
dml->pdmc_type = ml->type;
dml->pdmc_alen = ml->alen;
dml->pdmc_count = ml->count;
BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
}
rtnl_unlock();
nla_nest_end(nlskb, mca);
return 0;
}
static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
struct sk_buff *nlskb)
{
struct packet_diag_ring pdr;
if (!ring->pg_vec)
return 0;
pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
pdr.pdr_block_nr = ring->pg_vec_len;
pdr.pdr_frame_size = ring->frame_size;
pdr.pdr_frame_nr = ring->frame_max + 1;
if (ver > TPACKET_V2) {
pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
pdr.pdr_features = ring->prb_bdqc.feature_req_word;
} else {
pdr.pdr_retire_tmo = 0;
pdr.pdr_sizeof_priv = 0;
pdr.pdr_features = 0;
}
return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
}
static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
{
int ret;
mutex_lock(&po->pg_vec_lock);
ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
PACKET_DIAG_RX_RING, skb);
if (!ret)
ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
PACKET_DIAG_TX_RING, skb);
mutex_unlock(&po->pg_vec_lock);
return ret;
}
static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
{
int ret = 0;
mutex_lock(&fanout_mutex);
if (po->fanout) {
u32 val;
val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
}
mutex_unlock(&fanout_mutex);
return ret;
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
struct packet_diag_req *req,
bool may_report_filterinfo,
struct user_namespace *user_ns,
u32 portid, u32 seq, u32 flags, int sk_ino)
{
struct nlmsghdr *nlh;
struct packet_diag_msg *rp;
struct packet_sock *po = pkt_sk(sk);
nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
if (!nlh)
return -EMSGSIZE;
rp = nlmsg_data(nlh);
rp->pdiag_family = AF_PACKET;
rp->pdiag_type = sk->sk_type;
rp->pdiag_num = ntohs(READ_ONCE(po->num));
rp->pdiag_ino = sk_ino;
sock_diag_save_cookie(sk, rp->pdiag_cookie);
if ((req->pdiag_show & PACKET_SHOW_INFO) &&
pdiag_put_info(po, skb))
goto out_nlmsg_trim;
if ((req->pdiag_show & PACKET_SHOW_INFO) &&
nla_put_u32(skb, PACKET_DIAG_UID,
from_kuid_munged(user_ns, sock_i_uid(sk))))
goto out_nlmsg_trim;
if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
pdiag_put_mclist(po, skb))
goto out_nlmsg_trim;
if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
pdiag_put_rings_cfg(po, skb))
goto out_nlmsg_trim;
if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
pdiag_put_fanout(po, skb))
goto out_nlmsg_trim;
if ((req->pdiag_show & PACKET_SHOW_MEMINFO) &&
sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO))
goto out_nlmsg_trim;
if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
PACKET_DIAG_FILTER))
goto out_nlmsg_trim;
nlmsg_end(skb, nlh);
return 0;
out_nlmsg_trim:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int num = 0, s_num = cb->args[0];
struct packet_diag_req *req;
struct net *net;
struct sock *sk;
bool may_report_filterinfo;
net = sock_net(skb->sk);
req = nlmsg_data(cb->nlh);
may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
mutex_lock(&net->packet.sklist_lock);
sk_for_each(sk, &net->packet.sklist) {
if (!net_eq(sock_net(sk), net))
continue;
if (num < s_num)
goto next;
if (sk_diag_fill(sk, skb, req,
may_report_filterinfo,
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
sock_i_ino(sk)) < 0)
goto done;
next:
num++;
}
done:
mutex_unlock(&net->packet.sklist_lock);
cb->args[0] = num;
return skb->len;
}
static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
{
int hdrlen = sizeof(struct packet_diag_req);
struct net *net = sock_net(skb->sk);
struct packet_diag_req *req;
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
req = nlmsg_data(h);
/* Make it possible to support protocol filtering later */
if (req->sdiag_protocol)
return -EINVAL;
if (h->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = packet_diag_dump,
};
return netlink_dump_start(net->diag_nlsk, skb, h, &c);
} else
return -EOPNOTSUPP;
}
static const struct sock_diag_handler packet_diag_handler = {
.family = AF_PACKET,
.dump = packet_diag_handler_dump,
};
static int __init packet_diag_init(void)
{
return sock_diag_register(&packet_diag_handler);
}
static void __exit packet_diag_exit(void)
{
sock_diag_unregister(&packet_diag_handler);
}
module_init(packet_diag_init);
module_exit(packet_diag_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);
| linux-master | net/packet/diag.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* PACKET - implements raw packet sockets.
*
* Authors: Ross Biro
* Fred N. van Kempen, <[email protected]>
* Alan Cox, <[email protected]>
*
* Fixes:
* Alan Cox : verify_area() now used correctly
* Alan Cox : new skbuff lists, look ma no backlogs!
* Alan Cox : tidied skbuff lists.
* Alan Cox : Now uses generic datagram routines I
* added. Also fixed the peek/read crash
* from all old Linux datagram code.
* Alan Cox : Uses the improved datagram code.
* Alan Cox : Added NULL's for socket options.
* Alan Cox : Re-commented the code.
* Alan Cox : Use new kernel side addressing
* Rob Janssen : Correct MTU usage.
* Dave Platt : Counter leaks caused by incorrect
* interrupt locking and some slightly
* dubious gcc output. Can you read
* compiler: it said _VOLATILE_
* Richard Kooijman : Timestamp fixes.
* Alan Cox : New buffers. Use sk->mac.raw.
* Alan Cox : sendmsg/recvmsg support.
* Alan Cox : Protocol setting support
* Alexey Kuznetsov : Untied from IPv4 stack.
* Cyrus Durgin : Fixed kerneld for kmod.
* Michal Ostrowski : Module initialization cleanup.
* Ulises Alonso : Frame number limit removal and
* packet_set_ring memory leak.
* Eric Biederman : Allow for > 8 byte hardware addresses.
* The convention is that longer addresses
* will simply extend the hardware address
* byte arrays at the end of sockaddr_ll
* and packet_mreq.
* Johann Baudy : Added TX RING.
* Chetan Loke : Implemented TPACKET_V3 block abstraction
* layer.
* Copyright (C) 2011, <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ethtool.h>
#include <linux/filter.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
#include <linux/wireless.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/if_vlan.h>
#include <linux/virtio_net.h>
#include <linux/errqueue.h>
#include <linux/net_tstamp.h>
#include <linux/percpu.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
#endif
#include <linux/bpf.h>
#include <net/compat.h>
#include <linux/netfilter_netdev.h>
#include "internal.h"
/*
Assumptions:
- If the device has no dev->header_ops->create, there is no LL header
visible above the device. In this case, its hard_header_len should be 0.
The device may prepend its own header internally. In this case, its
needed_headroom should be set to the space needed for it to add its
internal header.
For example, a WiFi driver pretending to be an Ethernet driver should
set its hard_header_len to be the Ethernet header length, and set its
needed_headroom to be (the real WiFi header length - the fake Ethernet
header length).
- packet socket receives packets with pulled ll header,
so that SOCK_RAW should push it back.
On receive:
-----------
Incoming, dev_has_header(dev) == true
mac_header -> ll header
data -> data
Outgoing, dev_has_header(dev) == true
mac_header -> ll header
data -> ll header
Incoming, dev_has_header(dev) == false
mac_header -> data
However drivers often make it point to the ll header.
This is incorrect because the ll header should be invisible to us.
data -> data
Outgoing, dev_has_header(dev) == false
mac_header -> data. ll header is invisible to us.
data -> data
Resume
If dev_has_header(dev) == false we are unable to restore the ll header,
because it is invisible to us.
On transmit:
------------
dev_has_header(dev) == true
mac_header -> ll header
data -> ll header
dev_has_header(dev) == false (ll header is invisible to us)
mac_header -> data
data -> data
We should set network_header on output to the correct position,
packet classifier depends on it.
*/
/* Private packet socket structures. */
/* identical to struct packet_mreq except it has
* a longer address field.
*/
struct packet_mreq_max {
int mr_ifindex;
unsigned short mr_type;
unsigned short mr_alen;
unsigned char mr_address[MAX_ADDR_LEN];
};
union tpacket_uhdr {
struct tpacket_hdr *h1;
struct tpacket2_hdr *h2;
struct tpacket3_hdr *h3;
void *raw;
};
static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring);
#define V3_ALIGNMENT (8)
#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
#define BLK_PLUS_PRIV(sz_of_priv) \
(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
struct packet_sock;
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
static void *packet_previous_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status);
static void packet_increment_head(struct packet_ring_buffer *buff);
static int prb_curr_blk_in_use(struct tpacket_block_desc *);
static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
struct packet_sock *);
static void prb_retire_current_block(struct tpacket_kbdq_core *,
struct packet_sock *, unsigned int status);
static int prb_queue_frozen(struct tpacket_kbdq_core *);
static void prb_open_block(struct tpacket_kbdq_core *,
struct tpacket_block_desc *);
static void prb_retire_rx_blk_timer_expired(struct timer_list *);
static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
static void prb_clear_rxhash(struct tpacket_kbdq_core *,
struct tpacket3_hdr *);
static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
struct tpacket3_hdr *);
static void packet_flush_mclist(struct sock *sk);
static u16 packet_pick_tx_queue(struct sk_buff *skb);
struct packet_skb_cb {
union {
struct sockaddr_pkt pkt;
union {
/* Trick: alias skb original length with
* ll.sll_family and ll.protocol in order
* to save room.
*/
unsigned int origlen;
struct sockaddr_ll ll;
};
} sa;
};
#define vio_le() virtio_legacy_is_little_endian()
#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
#define GET_PBLOCK_DESC(x, bid) \
((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
#define GET_NEXT_PRB_BLK_NUM(x) \
(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
((x)->kactive_blk_num+1) : 0)
static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
static void __fanout_link(struct sock *sk, struct packet_sock *po);
#ifdef CONFIG_NETFILTER_EGRESS
static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
{
struct sk_buff *next, *head = NULL, *tail;
int rc;
rcu_read_lock();
for (; skb != NULL; skb = next) {
next = skb->next;
skb_mark_not_on_list(skb);
if (!nf_hook_egress(skb, &rc, skb->dev))
continue;
if (!head)
head = skb;
else
tail->next = skb;
tail = skb;
}
rcu_read_unlock();
return head;
}
#endif
static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb)
{
if (!packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS))
return dev_queue_xmit(skb);
#ifdef CONFIG_NETFILTER_EGRESS
if (nf_hook_egress_active()) {
skb = nf_hook_direct_egress(skb);
if (!skb)
return NET_XMIT_DROP;
}
#endif
return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
}
static struct net_device *packet_cached_dev_get(struct packet_sock *po)
{
struct net_device *dev;
rcu_read_lock();
dev = rcu_dereference(po->cached_dev);
dev_hold(dev);
rcu_read_unlock();
return dev;
}
static void packet_cached_dev_assign(struct packet_sock *po,
struct net_device *dev)
{
rcu_assign_pointer(po->cached_dev, dev);
}
static void packet_cached_dev_reset(struct packet_sock *po)
{
RCU_INIT_POINTER(po->cached_dev, NULL);
}
static u16 packet_pick_tx_queue(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
const struct net_device_ops *ops = dev->netdev_ops;
int cpu = raw_smp_processor_id();
u16 queue_index;
#ifdef CONFIG_XPS
skb->sender_cpu = cpu + 1;
#endif
skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
if (ops->ndo_select_queue) {
queue_index = ops->ndo_select_queue(dev, skb, NULL);
queue_index = netdev_cap_txqueue(dev, queue_index);
} else {
queue_index = netdev_pick_tx(dev, skb, NULL);
}
return queue_index;
}
/* __register_prot_hook must be invoked through register_prot_hook
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
*/
static void __register_prot_hook(struct sock *sk)
{
struct packet_sock *po = pkt_sk(sk);
if (!packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
if (po->fanout)
__fanout_link(sk, po);
else
dev_add_pack(&po->prot_hook);
sock_hold(sk);
packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 1);
}
}
static void register_prot_hook(struct sock *sk)
{
lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
__register_prot_hook(sk);
}
/* If the sync parameter is true, we will temporarily drop
* the po->bind_lock and do a synchronize_net to make sure no
* asynchronous packet processing paths still refer to the elements
* of po->prot_hook. If the sync parameter is false, it is the
* callers responsibility to take care of this.
*/
static void __unregister_prot_hook(struct sock *sk, bool sync)
{
struct packet_sock *po = pkt_sk(sk);
lockdep_assert_held_once(&po->bind_lock);
packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 0);
if (po->fanout)
__fanout_unlink(sk, po);
else
__dev_remove_pack(&po->prot_hook);
__sock_put(sk);
if (sync) {
spin_unlock(&po->bind_lock);
synchronize_net();
spin_lock(&po->bind_lock);
}
}
static void unregister_prot_hook(struct sock *sk, bool sync)
{
struct packet_sock *po = pkt_sk(sk);
if (packet_sock_flag(po, PACKET_SOCK_RUNNING))
__unregister_prot_hook(sk, sync);
}
static inline struct page * __pure pgv_to_page(void *addr)
{
if (is_vmalloc_addr(addr))
return vmalloc_to_page(addr);
return virt_to_page(addr);
}
static void __packet_set_status(struct packet_sock *po, void *frame, int status)
{
union tpacket_uhdr h;
/* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */
h.raw = frame;
switch (po->tp_version) {
case TPACKET_V1:
WRITE_ONCE(h.h1->tp_status, status);
flush_dcache_page(pgv_to_page(&h.h1->tp_status));
break;
case TPACKET_V2:
WRITE_ONCE(h.h2->tp_status, status);
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
break;
case TPACKET_V3:
WRITE_ONCE(h.h3->tp_status, status);
flush_dcache_page(pgv_to_page(&h.h3->tp_status));
break;
default:
WARN(1, "TPACKET version not supported.\n");
BUG();
}
smp_wmb();
}
static int __packet_get_status(const struct packet_sock *po, void *frame)
{
union tpacket_uhdr h;
smp_rmb();
/* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */
h.raw = frame;
switch (po->tp_version) {
case TPACKET_V1:
flush_dcache_page(pgv_to_page(&h.h1->tp_status));
return READ_ONCE(h.h1->tp_status);
case TPACKET_V2:
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
return READ_ONCE(h.h2->tp_status);
case TPACKET_V3:
flush_dcache_page(pgv_to_page(&h.h3->tp_status));
return READ_ONCE(h.h3->tp_status);
default:
WARN(1, "TPACKET version not supported.\n");
BUG();
return 0;
}
}
static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
unsigned int flags)
{
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
if (shhwtstamps &&
(flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
return TP_STATUS_TS_RAW_HARDWARE;
if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
ktime_to_timespec64_cond(skb_tstamp(skb), ts))
return TP_STATUS_TS_SOFTWARE;
return 0;
}
static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
struct sk_buff *skb)
{
union tpacket_uhdr h;
struct timespec64 ts;
__u32 ts_status;
if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp))))
return 0;
h.raw = frame;
/*
* versions 1 through 3 overflow the timestamps in y2106, since they
* all store the seconds in a 32-bit unsigned integer.
* If we create a version 4, that should have a 64-bit timestamp,
* either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
* nanoseconds.
*/
switch (po->tp_version) {
case TPACKET_V1:
h.h1->tp_sec = ts.tv_sec;
h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
break;
case TPACKET_V2:
h.h2->tp_sec = ts.tv_sec;
h.h2->tp_nsec = ts.tv_nsec;
break;
case TPACKET_V3:
h.h3->tp_sec = ts.tv_sec;
h.h3->tp_nsec = ts.tv_nsec;
break;
default:
WARN(1, "TPACKET version not supported.\n");
BUG();
}
/* one flush is safe, as both fields always lie on the same cacheline */
flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
smp_wmb();
return ts_status;
}
static void *packet_lookup_frame(const struct packet_sock *po,
const struct packet_ring_buffer *rb,
unsigned int position,
int status)
{
unsigned int pg_vec_pos, frame_offset;
union tpacket_uhdr h;
pg_vec_pos = position / rb->frames_per_block;
frame_offset = position % rb->frames_per_block;
h.raw = rb->pg_vec[pg_vec_pos].buffer +
(frame_offset * rb->frame_size);
if (status != __packet_get_status(po, h.raw))
return NULL;
return h.raw;
}
static void *packet_current_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status)
{
return packet_lookup_frame(po, rb, rb->head, status);
}
static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
{
del_timer_sync(&pkc->retire_blk_timer);
}
static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
struct sk_buff_head *rb_queue)
{
struct tpacket_kbdq_core *pkc;
pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
spin_lock_bh(&rb_queue->lock);
pkc->delete_blk_timer = 1;
spin_unlock_bh(&rb_queue->lock);
prb_del_retire_blk_timer(pkc);
}
static void prb_setup_retire_blk_timer(struct packet_sock *po)
{
struct tpacket_kbdq_core *pkc;
pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
0);
pkc->retire_blk_timer.expires = jiffies;
}
static int prb_calc_retire_blk_tmo(struct packet_sock *po,
int blk_size_in_bytes)
{
struct net_device *dev;
unsigned int mbits, div;
struct ethtool_link_ksettings ecmd;
int err;
rtnl_lock();
dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
if (unlikely(!dev)) {
rtnl_unlock();
return DEFAULT_PRB_RETIRE_TOV;
}
err = __ethtool_get_link_ksettings(dev, &ecmd);
rtnl_unlock();
if (err)
return DEFAULT_PRB_RETIRE_TOV;
/* If the link speed is so slow you don't really
* need to worry about perf anyways
*/
if (ecmd.base.speed < SPEED_1000 ||
ecmd.base.speed == SPEED_UNKNOWN)
return DEFAULT_PRB_RETIRE_TOV;
div = ecmd.base.speed / 1000;
mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
if (div)
mbits /= div;
if (div)
return mbits + 1;
return mbits;
}
static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
union tpacket_req_u *req_u)
{
p1->feature_req_word = req_u->req3.tp_feature_req_word;
}
static void init_prb_bdqc(struct packet_sock *po,
struct packet_ring_buffer *rb,
struct pgv *pg_vec,
union tpacket_req_u *req_u)
{
struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
struct tpacket_block_desc *pbd;
memset(p1, 0x0, sizeof(*p1));
p1->knxt_seq_num = 1;
p1->pkbdq = pg_vec;
pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
p1->pkblk_start = pg_vec[0].buffer;
p1->kblk_size = req_u->req3.tp_block_size;
p1->knum_blocks = req_u->req3.tp_block_nr;
p1->hdrlen = po->tp_hdrlen;
p1->version = po->tp_version;
p1->last_kactive_blk_num = 0;
po->stats.stats3.tp_freeze_q_cnt = 0;
if (req_u->req3.tp_retire_blk_tov)
p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
else
p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
req_u->req3.tp_block_size);
p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
rwlock_init(&p1->blk_fill_in_prog_lock);
p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
prb_init_ft_ops(p1, req_u);
prb_setup_retire_blk_timer(po);
prb_open_block(p1, pbd);
}
/* Do NOT update the last_blk_num first.
* Assumes sk_buff_head lock is held.
*/
static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
{
mod_timer(&pkc->retire_blk_timer,
jiffies + pkc->tov_in_jiffies);
pkc->last_kactive_blk_num = pkc->kactive_blk_num;
}
/*
* Timer logic:
* 1) We refresh the timer only when we open a block.
* By doing this we don't waste cycles refreshing the timer
* on packet-by-packet basis.
*
* With a 1MB block-size, on a 1Gbps line, it will take
* i) ~8 ms to fill a block + ii) memcpy etc.
* In this cut we are not accounting for the memcpy time.
*
* So, if the user sets the 'tmo' to 10ms then the timer
* will never fire while the block is still getting filled
* (which is what we want). However, the user could choose
* to close a block early and that's fine.
*
* But when the timer does fire, we check whether or not to refresh it.
* Since the tmo granularity is in msecs, it is not too expensive
* to refresh the timer, lets say every '8' msecs.
* Either the user can set the 'tmo' or we can derive it based on
* a) line-speed and b) block-size.
* prb_calc_retire_blk_tmo() calculates the tmo.
*
*/
static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
{
struct packet_sock *po =
from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
unsigned int frozen;
struct tpacket_block_desc *pbd;
spin_lock(&po->sk.sk_receive_queue.lock);
frozen = prb_queue_frozen(pkc);
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
if (unlikely(pkc->delete_blk_timer))
goto out;
/* We only need to plug the race when the block is partially filled.
* tpacket_rcv:
* lock(); increment BLOCK_NUM_PKTS; unlock()
* copy_bits() is in progress ...
* timer fires on other cpu:
* we can't retire the current block because copy_bits
* is in progress.
*
*/
if (BLOCK_NUM_PKTS(pbd)) {
/* Waiting for skb_copy_bits to finish... */
write_lock(&pkc->blk_fill_in_prog_lock);
write_unlock(&pkc->blk_fill_in_prog_lock);
}
if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
if (!frozen) {
if (!BLOCK_NUM_PKTS(pbd)) {
/* An empty block. Just refresh the timer. */
goto refresh_timer;
}
prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
if (!prb_dispatch_next_block(pkc, po))
goto refresh_timer;
else
goto out;
} else {
/* Case 1. Queue was frozen because user-space was
* lagging behind.
*/
if (prb_curr_blk_in_use(pbd)) {
/*
* Ok, user-space is still behind.
* So just refresh the timer.
*/
goto refresh_timer;
} else {
/* Case 2. queue was frozen,user-space caught up,
* now the link went idle && the timer fired.
* We don't have a block to close.So we open this
* block and restart the timer.
* opening a block thaws the queue,restarts timer
* Thawing/timer-refresh is a side effect.
*/
prb_open_block(pkc, pbd);
goto out;
}
}
}
refresh_timer:
_prb_refresh_rx_retire_blk_timer(pkc);
out:
spin_unlock(&po->sk.sk_receive_queue.lock);
}
static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
struct tpacket_block_desc *pbd1, __u32 status)
{
/* Flush everything minus the block header */
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
u8 *start, *end;
start = (u8 *)pbd1;
/* Skip the block header(we know header WILL fit in 4K) */
start += PAGE_SIZE;
end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
for (; start < end; start += PAGE_SIZE)
flush_dcache_page(pgv_to_page(start));
smp_wmb();
#endif
/* Now update the block status. */
BLOCK_STATUS(pbd1) = status;
/* Flush the block header */
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
start = (u8 *)pbd1;
flush_dcache_page(pgv_to_page(start));
smp_wmb();
#endif
}
/*
* Side effect:
*
* 1) flush the block
* 2) Increment active_blk_num
*
* Note:We DONT refresh the timer on purpose.
* Because almost always the next block will be opened.
*/
static void prb_close_block(struct tpacket_kbdq_core *pkc1,
struct tpacket_block_desc *pbd1,
struct packet_sock *po, unsigned int stat)
{
__u32 status = TP_STATUS_USER | stat;
struct tpacket3_hdr *last_pkt;
struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
struct sock *sk = &po->sk;
if (atomic_read(&po->tp_drops))
status |= TP_STATUS_LOSING;
last_pkt = (struct tpacket3_hdr *)pkc1->prev;
last_pkt->tp_next_offset = 0;
/* Get the ts of the last pkt */
if (BLOCK_NUM_PKTS(pbd1)) {
h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
} else {
/* Ok, we tmo'd - so get the current time.
*
* It shouldn't really happen as we don't close empty
* blocks. See prb_retire_rx_blk_timer_expired().
*/
struct timespec64 ts;
ktime_get_real_ts64(&ts);
h1->ts_last_pkt.ts_sec = ts.tv_sec;
h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
}
smp_wmb();
/* Flush the block */
prb_flush_block(pkc1, pbd1, status);
sk->sk_data_ready(sk);
pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
}
static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
{
pkc->reset_pending_on_curr_blk = 0;
}
/*
* Side effect of opening a block:
*
* 1) prb_queue is thawed.
* 2) retire_blk_timer is refreshed.
*
*/
static void prb_open_block(struct tpacket_kbdq_core *pkc1,
struct tpacket_block_desc *pbd1)
{
struct timespec64 ts;
struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
smp_rmb();
/* We could have just memset this but we will lose the
* flexibility of making the priv area sticky
*/
BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
BLOCK_NUM_PKTS(pbd1) = 0;
BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
ktime_get_real_ts64(&ts);
h1->ts_first_pkt.ts_sec = ts.tv_sec;
h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
pkc1->pkblk_start = (char *)pbd1;
pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
pbd1->version = pkc1->version;
pkc1->prev = pkc1->nxt_offset;
pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
prb_thaw_queue(pkc1);
_prb_refresh_rx_retire_blk_timer(pkc1);
smp_wmb();
}
/*
* Queue freeze logic:
* 1) Assume tp_block_nr = 8 blocks.
* 2) At time 't0', user opens Rx ring.
* 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
* 4) user-space is either sleeping or processing block '0'.
* 5) tpacket_rcv is currently filling block '7', since there is no space left,
* it will close block-7,loop around and try to fill block '0'.
* call-flow:
* __packet_lookup_frame_in_block
* prb_retire_current_block()
* prb_dispatch_next_block()
* |->(BLOCK_STATUS == USER) evaluates to true
* 5.1) Since block-0 is currently in-use, we just freeze the queue.
* 6) Now there are two cases:
* 6.1) Link goes idle right after the queue is frozen.
* But remember, the last open_block() refreshed the timer.
* When this timer expires,it will refresh itself so that we can
* re-open block-0 in near future.
* 6.2) Link is busy and keeps on receiving packets. This is a simple
* case and __packet_lookup_frame_in_block will check if block-0
* is free and can now be re-used.
*/
static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
struct packet_sock *po)
{
pkc->reset_pending_on_curr_blk = 1;
po->stats.stats3.tp_freeze_q_cnt++;
}
#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
/*
* If the next block is free then we will dispatch it
* and return a good offset.
* Else, we will freeze the queue.
* So, caller must check the return value.
*/
static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
struct packet_sock *po)
{
struct tpacket_block_desc *pbd;
smp_rmb();
/* 1. Get current block num */
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
/* 2. If this block is currently in_use then freeze the queue */
if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
prb_freeze_queue(pkc, po);
return NULL;
}
/*
* 3.
* open this block and return the offset where the first packet
* needs to get stored.
*/
prb_open_block(pkc, pbd);
return (void *)pkc->nxt_offset;
}
static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
struct packet_sock *po, unsigned int status)
{
struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
/* retire/close the current block */
if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
/*
* Plug the case where copy_bits() is in progress on
* cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
* have space to copy the pkt in the current block and
* called prb_retire_current_block()
*
* We don't need to worry about the TMO case because
* the timer-handler already handled this case.
*/
if (!(status & TP_STATUS_BLK_TMO)) {
/* Waiting for skb_copy_bits to finish... */
write_lock(&pkc->blk_fill_in_prog_lock);
write_unlock(&pkc->blk_fill_in_prog_lock);
}
prb_close_block(pkc, pbd, po, status);
return;
}
}
static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
{
return TP_STATUS_USER & BLOCK_STATUS(pbd);
}
static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
{
return pkc->reset_pending_on_curr_blk;
}
static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
__releases(&pkc->blk_fill_in_prog_lock)
{
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
read_unlock(&pkc->blk_fill_in_prog_lock);
}
static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
}
static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
ppd->hv1.tp_rxhash = 0;
}
static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
if (skb_vlan_tag_present(pkc->skb)) {
ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
ppd->hv1.tp_vlan_tci = 0;
ppd->hv1.tp_vlan_tpid = 0;
ppd->tp_status = TP_STATUS_AVAILABLE;
}
}
static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
ppd->hv1.tp_padding = 0;
prb_fill_vlan_info(pkc, ppd);
if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
prb_fill_rxhash(pkc, ppd);
else
prb_clear_rxhash(pkc, ppd);
}
static void prb_fill_curr_block(char *curr,
struct tpacket_kbdq_core *pkc,
struct tpacket_block_desc *pbd,
unsigned int len)
__acquires(&pkc->blk_fill_in_prog_lock)
{
struct tpacket3_hdr *ppd;
ppd = (struct tpacket3_hdr *)curr;
ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
pkc->prev = curr;
pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
BLOCK_NUM_PKTS(pbd) += 1;
read_lock(&pkc->blk_fill_in_prog_lock);
prb_run_all_ft_ops(pkc, ppd);
}
/* Assumes caller has the sk->rx_queue.lock */
static void *__packet_lookup_frame_in_block(struct packet_sock *po,
struct sk_buff *skb,
unsigned int len
)
{
struct tpacket_kbdq_core *pkc;
struct tpacket_block_desc *pbd;
char *curr, *end;
pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
/* Queue is frozen when user space is lagging behind */
if (prb_queue_frozen(pkc)) {
/*
* Check if that last block which caused the queue to freeze,
* is still in_use by user-space.
*/
if (prb_curr_blk_in_use(pbd)) {
/* Can't record this packet */
return NULL;
} else {
/*
* Ok, the block was released by user-space.
* Now let's open that block.
* opening a block also thaws the queue.
* Thawing is a side effect.
*/
prb_open_block(pkc, pbd);
}
}
smp_mb();
curr = pkc->nxt_offset;
pkc->skb = skb;
end = (char *)pbd + pkc->kblk_size;
/* first try the current block */
if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
prb_fill_curr_block(curr, pkc, pbd, len);
return (void *)curr;
}
/* Ok, close the current block */
prb_retire_current_block(pkc, po, 0);
/* Now, try to dispatch the next block */
curr = (char *)prb_dispatch_next_block(pkc, po);
if (curr) {
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
prb_fill_curr_block(curr, pkc, pbd, len);
return (void *)curr;
}
/*
* No free blocks are available.user_space hasn't caught up yet.
* Queue was just frozen and now this packet will get dropped.
*/
return NULL;
}
static void *packet_current_rx_frame(struct packet_sock *po,
struct sk_buff *skb,
int status, unsigned int len)
{
char *curr = NULL;
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
curr = packet_lookup_frame(po, &po->rx_ring,
po->rx_ring.head, status);
return curr;
case TPACKET_V3:
return __packet_lookup_frame_in_block(po, skb, len);
default:
WARN(1, "TPACKET version not supported\n");
BUG();
return NULL;
}
}
static void *prb_lookup_block(const struct packet_sock *po,
const struct packet_ring_buffer *rb,
unsigned int idx,
int status)
{
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
if (status != BLOCK_STATUS(pbd))
return NULL;
return pbd;
}
static int prb_previous_blk_num(struct packet_ring_buffer *rb)
{
unsigned int prev;
if (rb->prb_bdqc.kactive_blk_num)
prev = rb->prb_bdqc.kactive_blk_num-1;
else
prev = rb->prb_bdqc.knum_blocks-1;
return prev;
}
/* Assumes caller has held the rx_queue.lock */
static void *__prb_previous_block(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status)
{
unsigned int previous = prb_previous_blk_num(rb);
return prb_lookup_block(po, rb, previous, status);
}
static void *packet_previous_rx_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status)
{
if (po->tp_version <= TPACKET_V2)
return packet_previous_frame(po, rb, status);
return __prb_previous_block(po, rb, status);
}
static void packet_increment_rx_head(struct packet_sock *po,
struct packet_ring_buffer *rb)
{
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
return packet_increment_head(rb);
case TPACKET_V3:
default:
WARN(1, "TPACKET version not supported.\n");
BUG();
return;
}
}
static void *packet_previous_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status)
{
unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
return packet_lookup_frame(po, rb, previous, status);
}
static void packet_increment_head(struct packet_ring_buffer *buff)
{
buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
}
static void packet_inc_pending(struct packet_ring_buffer *rb)
{
this_cpu_inc(*rb->pending_refcnt);
}
static void packet_dec_pending(struct packet_ring_buffer *rb)
{
this_cpu_dec(*rb->pending_refcnt);
}
static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
{
unsigned int refcnt = 0;
int cpu;
/* We don't use pending refcount in rx_ring. */
if (rb->pending_refcnt == NULL)
return 0;
for_each_possible_cpu(cpu)
refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
return refcnt;
}
static int packet_alloc_pending(struct packet_sock *po)
{
po->rx_ring.pending_refcnt = NULL;
po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
if (unlikely(po->tx_ring.pending_refcnt == NULL))
return -ENOBUFS;
return 0;
}
static void packet_free_pending(struct packet_sock *po)
{
free_percpu(po->tx_ring.pending_refcnt);
}
#define ROOM_POW_OFF 2
#define ROOM_NONE 0x0
#define ROOM_LOW 0x1
#define ROOM_NORMAL 0x2
static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
{
int idx, len;
len = READ_ONCE(po->rx_ring.frame_max) + 1;
idx = READ_ONCE(po->rx_ring.head);
if (pow_off)
idx += len >> pow_off;
if (idx >= len)
idx -= len;
return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
}
static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
{
int idx, len;
len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
if (pow_off)
idx += len >> pow_off;
if (idx >= len)
idx -= len;
return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
}
static int __packet_rcv_has_room(const struct packet_sock *po,
const struct sk_buff *skb)
{
const struct sock *sk = &po->sk;
int ret = ROOM_NONE;
if (po->prot_hook.func != tpacket_rcv) {
int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
- (skb ? skb->truesize : 0);
if (avail > (rcvbuf >> ROOM_POW_OFF))
return ROOM_NORMAL;
else if (avail > 0)
return ROOM_LOW;
else
return ROOM_NONE;
}
if (po->tp_version == TPACKET_V3) {
if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
ret = ROOM_NORMAL;
else if (__tpacket_v3_has_room(po, 0))
ret = ROOM_LOW;
} else {
if (__tpacket_has_room(po, ROOM_POW_OFF))
ret = ROOM_NORMAL;
else if (__tpacket_has_room(po, 0))
ret = ROOM_LOW;
}
return ret;
}
static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
{
bool pressure;
int ret;
ret = __packet_rcv_has_room(po, skb);
pressure = ret != ROOM_NORMAL;
if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) != pressure)
packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, pressure);
return ret;
}
static void packet_rcv_try_clear_pressure(struct packet_sock *po)
{
if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) &&
__packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, false);
}
static void packet_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_error_queue);
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive packet socket: %p\n", sk);
return;
}
}
static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
{
u32 *history = po->rollover->history;
u32 victim, rxhash;
int i, count = 0;
rxhash = skb_get_hash(skb);
for (i = 0; i < ROLLOVER_HLEN; i++)
if (READ_ONCE(history[i]) == rxhash)
count++;
victim = get_random_u32_below(ROLLOVER_HLEN);
/* Avoid dirtying the cache line if possible */
if (READ_ONCE(history[victim]) != rxhash)
WRITE_ONCE(history[victim], rxhash);
return count > (ROLLOVER_HLEN >> 1);
}
static unsigned int fanout_demux_hash(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
}
static unsigned int fanout_demux_lb(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
unsigned int val = atomic_inc_return(&f->rr_cur);
return val % num;
}
static unsigned int fanout_demux_cpu(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
return smp_processor_id() % num;
}
static unsigned int fanout_demux_rnd(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
return get_random_u32_below(num);
}
static unsigned int fanout_demux_rollover(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int idx, bool try_self,
unsigned int num)
{
struct packet_sock *po, *po_next, *po_skip = NULL;
unsigned int i, j, room = ROOM_NONE;
po = pkt_sk(rcu_dereference(f->arr[idx]));
if (try_self) {
room = packet_rcv_has_room(po, skb);
if (room == ROOM_NORMAL ||
(room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
return idx;
po_skip = po;
}
i = j = min_t(int, po->rollover->sock, num - 1);
do {
po_next = pkt_sk(rcu_dereference(f->arr[i]));
if (po_next != po_skip &&
!packet_sock_flag(po_next, PACKET_SOCK_PRESSURE) &&
packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
if (i != j)
po->rollover->sock = i;
atomic_long_inc(&po->rollover->num);
if (room == ROOM_LOW)
atomic_long_inc(&po->rollover->num_huge);
return i;
}
if (++i == num)
i = 0;
} while (i != j);
atomic_long_inc(&po->rollover->num_failed);
return idx;
}
static unsigned int fanout_demux_qm(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
return skb_get_queue_mapping(skb) % num;
}
static unsigned int fanout_demux_bpf(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
struct bpf_prog *prog;
unsigned int ret = 0;
rcu_read_lock();
prog = rcu_dereference(f->bpf_prog);
if (prog)
ret = bpf_prog_run_clear_cb(prog, skb) % num;
rcu_read_unlock();
return ret;
}
static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
{
return f->flags & (flag >> 8);
}
static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct packet_fanout *f = pt->af_packet_priv;
unsigned int num = READ_ONCE(f->num_members);
struct net *net = read_pnet(&f->net);
struct packet_sock *po;
unsigned int idx;
if (!net_eq(dev_net(dev), net) || !num) {
kfree_skb(skb);
return 0;
}
if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
if (!skb)
return 0;
}
switch (f->type) {
case PACKET_FANOUT_HASH:
default:
idx = fanout_demux_hash(f, skb, num);
break;
case PACKET_FANOUT_LB:
idx = fanout_demux_lb(f, skb, num);
break;
case PACKET_FANOUT_CPU:
idx = fanout_demux_cpu(f, skb, num);
break;
case PACKET_FANOUT_RND:
idx = fanout_demux_rnd(f, skb, num);
break;
case PACKET_FANOUT_QM:
idx = fanout_demux_qm(f, skb, num);
break;
case PACKET_FANOUT_ROLLOVER:
idx = fanout_demux_rollover(f, skb, 0, false, num);
break;
case PACKET_FANOUT_CBPF:
case PACKET_FANOUT_EBPF:
idx = fanout_demux_bpf(f, skb, num);
break;
}
if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
idx = fanout_demux_rollover(f, skb, idx, true, num);
po = pkt_sk(rcu_dereference(f->arr[idx]));
return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
}
DEFINE_MUTEX(fanout_mutex);
EXPORT_SYMBOL_GPL(fanout_mutex);
static LIST_HEAD(fanout_list);
static u16 fanout_next_id;
static void __fanout_link(struct sock *sk, struct packet_sock *po)
{
struct packet_fanout *f = po->fanout;
spin_lock(&f->lock);
rcu_assign_pointer(f->arr[f->num_members], sk);
smp_wmb();
f->num_members++;
if (f->num_members == 1)
dev_add_pack(&f->prot_hook);
spin_unlock(&f->lock);
}
static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
{
struct packet_fanout *f = po->fanout;
int i;
spin_lock(&f->lock);
for (i = 0; i < f->num_members; i++) {
if (rcu_dereference_protected(f->arr[i],
lockdep_is_held(&f->lock)) == sk)
break;
}
BUG_ON(i >= f->num_members);
rcu_assign_pointer(f->arr[i],
rcu_dereference_protected(f->arr[f->num_members - 1],
lockdep_is_held(&f->lock)));
f->num_members--;
if (f->num_members == 0)
__dev_remove_pack(&f->prot_hook);
spin_unlock(&f->lock);
}
static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
{
if (sk->sk_family != PF_PACKET)
return false;
return ptype->af_packet_priv == pkt_sk(sk)->fanout;
}
static void fanout_init_data(struct packet_fanout *f)
{
switch (f->type) {
case PACKET_FANOUT_LB:
atomic_set(&f->rr_cur, 0);
break;
case PACKET_FANOUT_CBPF:
case PACKET_FANOUT_EBPF:
RCU_INIT_POINTER(f->bpf_prog, NULL);
break;
}
}
static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
{
struct bpf_prog *old;
spin_lock(&f->lock);
old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
rcu_assign_pointer(f->bpf_prog, new);
spin_unlock(&f->lock);
if (old) {
synchronize_net();
bpf_prog_destroy(old);
}
}
static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
unsigned int len)
{
struct bpf_prog *new;
struct sock_fprog fprog;
int ret;
if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
return -EPERM;
ret = copy_bpf_fprog_from_user(&fprog, data, len);
if (ret)
return ret;
ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
if (ret)
return ret;
__fanout_set_data_bpf(po->fanout, new);
return 0;
}
static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
unsigned int len)
{
struct bpf_prog *new;
u32 fd;
if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
return -EPERM;
if (len != sizeof(fd))
return -EINVAL;
if (copy_from_sockptr(&fd, data, len))
return -EFAULT;
new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
if (IS_ERR(new))
return PTR_ERR(new);
__fanout_set_data_bpf(po->fanout, new);
return 0;
}
static int fanout_set_data(struct packet_sock *po, sockptr_t data,
unsigned int len)
{
switch (po->fanout->type) {
case PACKET_FANOUT_CBPF:
return fanout_set_data_cbpf(po, data, len);
case PACKET_FANOUT_EBPF:
return fanout_set_data_ebpf(po, data, len);
default:
return -EINVAL;
}
}
static void fanout_release_data(struct packet_fanout *f)
{
switch (f->type) {
case PACKET_FANOUT_CBPF:
case PACKET_FANOUT_EBPF:
__fanout_set_data_bpf(f, NULL);
}
}
static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
{
struct packet_fanout *f;
list_for_each_entry(f, &fanout_list, list) {
if (f->id == candidate_id &&
read_pnet(&f->net) == sock_net(sk)) {
return false;
}
}
return true;
}
static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
{
u16 id = fanout_next_id;
do {
if (__fanout_id_is_free(sk, id)) {
*new_id = id;
fanout_next_id = id + 1;
return true;
}
id++;
} while (id != fanout_next_id);
return false;
}
static int fanout_add(struct sock *sk, struct fanout_args *args)
{
struct packet_rollover *rollover = NULL;
struct packet_sock *po = pkt_sk(sk);
u16 type_flags = args->type_flags;
struct packet_fanout *f, *match;
u8 type = type_flags & 0xff;
u8 flags = type_flags >> 8;
u16 id = args->id;
int err;
switch (type) {
case PACKET_FANOUT_ROLLOVER:
if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
return -EINVAL;
break;
case PACKET_FANOUT_HASH:
case PACKET_FANOUT_LB:
case PACKET_FANOUT_CPU:
case PACKET_FANOUT_RND:
case PACKET_FANOUT_QM:
case PACKET_FANOUT_CBPF:
case PACKET_FANOUT_EBPF:
break;
default:
return -EINVAL;
}
mutex_lock(&fanout_mutex);
err = -EALREADY;
if (po->fanout)
goto out;
if (type == PACKET_FANOUT_ROLLOVER ||
(type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
err = -ENOMEM;
rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
if (!rollover)
goto out;
atomic_long_set(&rollover->num, 0);
atomic_long_set(&rollover->num_huge, 0);
atomic_long_set(&rollover->num_failed, 0);
}
if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
if (id != 0) {
err = -EINVAL;
goto out;
}
if (!fanout_find_new_id(sk, &id)) {
err = -ENOMEM;
goto out;
}
/* ephemeral flag for the first socket in the group: drop it */
flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
}
match = NULL;
list_for_each_entry(f, &fanout_list, list) {
if (f->id == id &&
read_pnet(&f->net) == sock_net(sk)) {
match = f;
break;
}
}
err = -EINVAL;
if (match) {
if (match->flags != flags)
goto out;
if (args->max_num_members &&
args->max_num_members != match->max_num_members)
goto out;
} else {
if (args->max_num_members > PACKET_FANOUT_MAX)
goto out;
if (!args->max_num_members)
/* legacy PACKET_FANOUT_MAX */
args->max_num_members = 256;
err = -ENOMEM;
match = kvzalloc(struct_size(match, arr, args->max_num_members),
GFP_KERNEL);
if (!match)
goto out;
write_pnet(&match->net, sock_net(sk));
match->id = id;
match->type = type;
match->flags = flags;
INIT_LIST_HEAD(&match->list);
spin_lock_init(&match->lock);
refcount_set(&match->sk_ref, 0);
fanout_init_data(match);
match->prot_hook.type = po->prot_hook.type;
match->prot_hook.dev = po->prot_hook.dev;
match->prot_hook.func = packet_rcv_fanout;
match->prot_hook.af_packet_priv = match;
match->prot_hook.af_packet_net = read_pnet(&match->net);
match->prot_hook.id_match = match_fanout_group;
match->max_num_members = args->max_num_members;
match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING;
list_add(&match->list, &fanout_list);
}
err = -EINVAL;
spin_lock(&po->bind_lock);
if (packet_sock_flag(po, PACKET_SOCK_RUNNING) &&
match->type == type &&
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
if (refcount_read(&match->sk_ref) < match->max_num_members) {
__dev_remove_pack(&po->prot_hook);
/* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
WRITE_ONCE(po->fanout, match);
po->rollover = rollover;
rollover = NULL;
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
__fanout_link(sk, po);
err = 0;
}
}
spin_unlock(&po->bind_lock);
if (err && !refcount_read(&match->sk_ref)) {
list_del(&match->list);
kvfree(match);
}
out:
kfree(rollover);
mutex_unlock(&fanout_mutex);
return err;
}
/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
* pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
* It is the responsibility of the caller to call fanout_release_data() and
* free the returned packet_fanout (after synchronize_net())
*/
static struct packet_fanout *fanout_release(struct sock *sk)
{
struct packet_sock *po = pkt_sk(sk);
struct packet_fanout *f;
mutex_lock(&fanout_mutex);
f = po->fanout;
if (f) {
po->fanout = NULL;
if (refcount_dec_and_test(&f->sk_ref))
list_del(&f->list);
else
f = NULL;
}
mutex_unlock(&fanout_mutex);
return f;
}
static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
struct sk_buff *skb)
{
/* Earlier code assumed this would be a VLAN pkt, double-check
* this now that we have the actual packet in hand. We can only
* do this check on Ethernet devices.
*/
if (unlikely(dev->type != ARPHRD_ETHER))
return false;
skb_reset_mac_header(skb);
return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
}
static const struct proto_ops packet_ops;
static const struct proto_ops packet_ops_spkt;
static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct sockaddr_pkt *spkt;
/*
* When we registered the protocol we saved the socket in the data
* field for just this event.
*/
sk = pt->af_packet_priv;
/*
* Yank back the headers [hope the device set this
* right or kerboom...]
*
* Incoming packets have ll header pulled,
* push it back.
*
* For outgoing ones skb->data == skb_mac_header(skb)
* so that this procedure is noop.
*/
if (skb->pkt_type == PACKET_LOOPBACK)
goto out;
if (!net_eq(dev_net(dev), sock_net(sk)))
goto out;
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb == NULL)
goto oom;
/* drop any routing info */
skb_dst_drop(skb);
/* drop conntrack reference */
nf_reset_ct(skb);
spkt = &PACKET_SKB_CB(skb)->sa.pkt;
skb_push(skb, skb->data - skb_mac_header(skb));
/*
* The SOCK_PACKET socket receives _all_ frames.
*/
spkt->spkt_family = dev->type;
strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
spkt->spkt_protocol = skb->protocol;
/*
* Charge the memory to the socket. This is done specifically
* to prevent sockets using all the memory up.
*/
if (sock_queue_rcv_skb(sk, skb) == 0)
return 0;
out:
kfree_skb(skb);
oom:
return 0;
}
static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
{
int depth;
if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
sock->type == SOCK_RAW) {
skb_reset_mac_header(skb);
skb->protocol = dev_parse_header_protocol(skb);
}
/* Move network header to the right position for VLAN tagged packets */
if (likely(skb->dev->type == ARPHRD_ETHER) &&
eth_type_vlan(skb->protocol) &&
vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
skb_probe_transport_header(skb);
}
/*
* Output a raw packet to a device layer. This bypasses all the other
* protocol layers and you must therefore supply it with a complete frame
*/
static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
struct sk_buff *skb = NULL;
struct net_device *dev;
struct sockcm_cookie sockc;
__be16 proto = 0;
int err;
int extra_len = 0;
/*
* Get and verify the address.
*/
if (saddr) {
if (msg->msg_namelen < sizeof(struct sockaddr))
return -EINVAL;
if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
proto = saddr->spkt_protocol;
} else
return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
/*
* Find the device first to size check it
*/
saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
retry:
rcu_read_lock();
dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
err = -ENODEV;
if (dev == NULL)
goto out_unlock;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_unlock;
/*
* You may not queue a frame bigger than the mtu. This is the lowest level
* raw protocol and you must do your own fragmentation at this level.
*/
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
if (!netif_supports_nofcs(dev)) {
err = -EPROTONOSUPPORT;
goto out_unlock;
}
extra_len = 4; /* We're doing our own CRC */
}
err = -EMSGSIZE;
if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
goto out_unlock;
if (!skb) {
size_t reserved = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
rcu_read_unlock();
skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
if (skb == NULL)
return -ENOBUFS;
/* FIXME: Save some space for broken drivers that write a hard
* header at transmission time by themselves. PPP is the notable
* one here. This should really be fixed at the driver level.
*/
skb_reserve(skb, reserved);
skb_reset_network_header(skb);
/* Try to align data part correctly */
if (hhlen) {
skb->data -= hhlen;
skb->tail -= hhlen;
if (len < hhlen)
skb_reset_network_header(skb);
}
err = memcpy_from_msg(skb_put(skb, len), msg, len);
if (err)
goto out_free;
goto retry;
}
if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
err = -EINVAL;
goto out_unlock;
}
if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
!packet_extra_vlan_len_allowed(dev, skb)) {
err = -EMSGSIZE;
goto out_unlock;
}
sockcm_init(&sockc, sk);
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
if (unlikely(err))
goto out_unlock;
}
skb->protocol = proto;
skb->dev = dev;
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = READ_ONCE(sk->sk_mark);
skb->tstamp = sockc.transmit_time;
skb_setup_tx_timestamp(skb, sockc.tsflags);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
packet_parse_headers(skb, sock);
dev_queue_xmit(skb);
rcu_read_unlock();
return len;
out_unlock:
rcu_read_unlock();
out_free:
kfree_skb(skb);
return err;
}
static unsigned int run_filter(struct sk_buff *skb,
const struct sock *sk,
unsigned int res)
{
struct sk_filter *filter;
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
if (filter != NULL)
res = bpf_prog_run_clear_cb(filter->prog, skb);
rcu_read_unlock();
return res;
}
static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
size_t *len, int vnet_hdr_sz)
{
struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 };
if (*len < vnet_hdr_sz)
return -EINVAL;
*len -= vnet_hdr_sz;
if (virtio_net_hdr_from_skb(skb, (struct virtio_net_hdr *)&vnet_hdr, vio_le(), true, 0))
return -EINVAL;
return memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz);
}
/*
* This function makes lazy skb cloning in hope that most of packets
* are discarded by BPF.
*
* Note tricky part: we DO mangle shared skb! skb->data, skb->len
* and skb->cb are mangled. It works because (and until) packets
* falling here are owned by current CPU. Output packets are cloned
* by dev_queue_xmit_nit(), input packets are processed by net_bh
* sequentially, so that if we return skb to original state on exit,
* we will not harm anyone.
*/
static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct sockaddr_ll *sll;
struct packet_sock *po;
u8 *skb_head = skb->data;
int skb_len = skb->len;
unsigned int snaplen, res;
bool is_drop_n_account = false;
if (skb->pkt_type == PACKET_LOOPBACK)
goto drop;
sk = pt->af_packet_priv;
po = pkt_sk(sk);
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
skb->dev = dev;
if (dev_has_header(dev)) {
/* The device has an explicit notion of ll header,
* exported to higher levels.
*
* Otherwise, the device hides details of its frame
* structure, so that corresponding packet head is
* never delivered to user.
*/
if (sk->sk_type != SOCK_DGRAM)
skb_push(skb, skb->data - skb_mac_header(skb));
else if (skb->pkt_type == PACKET_OUTGOING) {
/* Special case: outgoing packets have ll header at head */
skb_pull(skb, skb_network_offset(skb));
}
}
snaplen = skb->len;
res = run_filter(skb, sk, snaplen);
if (!res)
goto drop_n_restore;
if (snaplen > res)
snaplen = res;
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto drop_n_acct;
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb == NULL)
goto drop_n_acct;
if (skb_head != skb->data) {
skb->data = skb_head;
skb->len = skb_len;
}
consume_skb(skb);
skb = nskb;
}
sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
sll = &PACKET_SKB_CB(skb)->sa.ll;
sll->sll_hatype = dev->type;
sll->sll_pkttype = skb->pkt_type;
if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
* Use their space for storing the original skb length.
*/
PACKET_SKB_CB(skb)->sa.origlen = skb->len;
if (pskb_trim(skb, snaplen))
goto drop_n_acct;
skb_set_owner_r(skb, sk);
skb->dev = NULL;
skb_dst_drop(skb);
/* drop conntrack reference */
nf_reset_ct(skb);
spin_lock(&sk->sk_receive_queue.lock);
po->stats.stats1.tp_packets++;
sock_skb_set_dropcount(sk, skb);
skb_clear_delivery_time(skb);
__skb_queue_tail(&sk->sk_receive_queue, skb);
spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk);
return 0;
drop_n_acct:
is_drop_n_account = true;
atomic_inc(&po->tp_drops);
atomic_inc(&sk->sk_drops);
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
skb->data = skb_head;
skb->len = skb_len;
}
drop:
if (!is_drop_n_account)
consume_skb(skb);
else
kfree_skb(skb);
return 0;
}
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct packet_sock *po;
struct sockaddr_ll *sll;
union tpacket_uhdr h;
u8 *skb_head = skb->data;
int skb_len = skb->len;
unsigned int snaplen, res;
unsigned long status = TP_STATUS_USER;
unsigned short macoff, hdrlen;
unsigned int netoff;
struct sk_buff *copy_skb = NULL;
struct timespec64 ts;
__u32 ts_status;
bool is_drop_n_account = false;
unsigned int slot_id = 0;
int vnet_hdr_sz = 0;
/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
* We may add members to them until current aligned size without forcing
* userspace to call getsockopt(..., PACKET_HDRLEN, ...).
*/
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
if (skb->pkt_type == PACKET_LOOPBACK)
goto drop;
sk = pt->af_packet_priv;
po = pkt_sk(sk);
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
if (dev_has_header(dev)) {
if (sk->sk_type != SOCK_DGRAM)
skb_push(skb, skb->data - skb_mac_header(skb));
else if (skb->pkt_type == PACKET_OUTGOING) {
/* Special case: outgoing packets have ll header at head */
skb_pull(skb, skb_network_offset(skb));
}
}
snaplen = skb->len;
res = run_filter(skb, sk, snaplen);
if (!res)
goto drop_n_restore;
/* If we are flooded, just give up */
if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
atomic_inc(&po->tp_drops);
goto drop_n_restore;
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= TP_STATUS_CSUMNOTREADY;
else if (skb->pkt_type != PACKET_OUTGOING &&
skb_csum_unnecessary(skb))
status |= TP_STATUS_CSUM_VALID;
if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
status |= TP_STATUS_GSO_TCP;
if (snaplen > res)
snaplen = res;
if (sk->sk_type == SOCK_DGRAM) {
macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
po->tp_reserve;
} else {
unsigned int maclen = skb_network_offset(skb);
netoff = TPACKET_ALIGN(po->tp_hdrlen +
(maclen < 16 ? 16 : maclen)) +
po->tp_reserve;
vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
if (vnet_hdr_sz)
netoff += vnet_hdr_sz;
macoff = netoff - maclen;
}
if (netoff > USHRT_MAX) {
atomic_inc(&po->tp_drops);
goto drop_n_restore;
}
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
} else {
copy_skb = skb_get(skb);
skb_head = skb->data;
}
if (copy_skb) {
memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
skb_set_owner_r(copy_skb, sk);
}
}
snaplen = po->rx_ring.frame_size - macoff;
if ((int)snaplen < 0) {
snaplen = 0;
vnet_hdr_sz = 0;
}
}
} else if (unlikely(macoff + snaplen >
GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
u32 nval;
nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
snaplen, nval, macoff);
snaplen = nval;
if (unlikely((int)snaplen < 0)) {
snaplen = 0;
macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
vnet_hdr_sz = 0;
}
}
spin_lock(&sk->sk_receive_queue.lock);
h.raw = packet_current_rx_frame(po, skb,
TP_STATUS_KERNEL, (macoff+snaplen));
if (!h.raw)
goto drop_n_account;
if (po->tp_version <= TPACKET_V2) {
slot_id = po->rx_ring.head;
if (test_bit(slot_id, po->rx_ring.rx_owner_map))
goto drop_n_account;
__set_bit(slot_id, po->rx_ring.rx_owner_map);
}
if (vnet_hdr_sz &&
virtio_net_hdr_from_skb(skb, h.raw + macoff -
sizeof(struct virtio_net_hdr),
vio_le(), true, 0)) {
if (po->tp_version == TPACKET_V3)
prb_clear_blk_fill_status(&po->rx_ring);
goto drop_n_account;
}
if (po->tp_version <= TPACKET_V2) {
packet_increment_rx_head(po, &po->rx_ring);
/*
* LOSING will be reported till you read the stats,
* because it's COR - Clear On Read.
* Anyways, moving it for V1/V2 only as V3 doesn't need this
* at packet level.
*/
if (atomic_read(&po->tp_drops))
status |= TP_STATUS_LOSING;
}
po->stats.stats1.tp_packets++;
if (copy_skb) {
status |= TP_STATUS_COPY;
skb_clear_delivery_time(copy_skb);
__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
}
spin_unlock(&sk->sk_receive_queue.lock);
skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
/* Always timestamp; prefer an existing software timestamp taken
* closer to the time of capture.
*/
ts_status = tpacket_get_timestamp(skb, &ts,
READ_ONCE(po->tp_tstamp) |
SOF_TIMESTAMPING_SOFTWARE);
if (!ts_status)
ktime_get_real_ts64(&ts);
status |= ts_status;
switch (po->tp_version) {
case TPACKET_V1:
h.h1->tp_len = skb->len;
h.h1->tp_snaplen = snaplen;
h.h1->tp_mac = macoff;
h.h1->tp_net = netoff;
h.h1->tp_sec = ts.tv_sec;
h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
hdrlen = sizeof(*h.h1);
break;
case TPACKET_V2:
h.h2->tp_len = skb->len;
h.h2->tp_snaplen = snaplen;
h.h2->tp_mac = macoff;
h.h2->tp_net = netoff;
h.h2->tp_sec = ts.tv_sec;
h.h2->tp_nsec = ts.tv_nsec;
if (skb_vlan_tag_present(skb)) {
h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
h.h2->tp_vlan_tci = 0;
h.h2->tp_vlan_tpid = 0;
}
memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
hdrlen = sizeof(*h.h2);
break;
case TPACKET_V3:
/* tp_nxt_offset,vlan are already populated above.
* So DONT clear those fields here
*/
h.h3->tp_status |= status;
h.h3->tp_len = skb->len;
h.h3->tp_snaplen = snaplen;
h.h3->tp_mac = macoff;
h.h3->tp_net = netoff;
h.h3->tp_sec = ts.tv_sec;
h.h3->tp_nsec = ts.tv_nsec;
memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
hdrlen = sizeof(*h.h3);
break;
default:
BUG();
}
sll = h.raw + TPACKET_ALIGN(hdrlen);
sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
sll->sll_family = AF_PACKET;
sll->sll_hatype = dev->type;
sll->sll_protocol = skb->protocol;
sll->sll_pkttype = skb->pkt_type;
if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
smp_mb();
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
if (po->tp_version <= TPACKET_V2) {
u8 *start, *end;
end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
macoff + snaplen);
for (start = h.raw; start < end; start += PAGE_SIZE)
flush_dcache_page(pgv_to_page(start));
}
smp_wmb();
#endif
if (po->tp_version <= TPACKET_V2) {
spin_lock(&sk->sk_receive_queue.lock);
__packet_set_status(po, h.raw, status);
__clear_bit(slot_id, po->rx_ring.rx_owner_map);
spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk);
} else if (po->tp_version == TPACKET_V3) {
prb_clear_blk_fill_status(&po->rx_ring);
}
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
skb->data = skb_head;
skb->len = skb_len;
}
drop:
if (!is_drop_n_account)
consume_skb(skb);
else
kfree_skb(skb);
return 0;
drop_n_account:
spin_unlock(&sk->sk_receive_queue.lock);
atomic_inc(&po->tp_drops);
is_drop_n_account = true;
sk->sk_data_ready(sk);
kfree_skb(copy_skb);
goto drop_n_restore;
}
static void tpacket_destruct_skb(struct sk_buff *skb)
{
struct packet_sock *po = pkt_sk(skb->sk);
if (likely(po->tx_ring.pg_vec)) {
void *ph;
__u32 ts;
ph = skb_zcopy_get_nouarg(skb);
packet_dec_pending(&po->tx_ring);
ts = __packet_set_timestamp(po, ph, skb);
__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
if (!packet_read_pending(&po->tx_ring))
complete(&po->skb_completion);
}
sock_wfree(skb);
}
static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
{
if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
(__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
return -EINVAL;
return 0;
}
static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
{
int ret;
if (*len < vnet_hdr_sz)
return -EINVAL;
*len -= vnet_hdr_sz;
if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
return -EFAULT;
ret = __packet_snd_vnet_parse(vnet_hdr, *len);
if (ret)
return ret;
/* move iter to point to the start of mac header */
if (vnet_hdr_sz != sizeof(struct virtio_net_hdr))
iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
return 0;
}
static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
void *frame, struct net_device *dev, void *data, int tp_len,
__be16 proto, unsigned char *addr, int hlen, int copylen,
const struct sockcm_cookie *sockc)
{
union tpacket_uhdr ph;
int to_write, offset, len, nr_frags, len_max;
struct socket *sock = po->sk.sk_socket;
struct page *page;
int err;
ph.raw = frame;
skb->protocol = proto;
skb->dev = dev;
skb->priority = READ_ONCE(po->sk.sk_priority);
skb->mark = READ_ONCE(po->sk.sk_mark);
skb->tstamp = sockc->transmit_time;
skb_setup_tx_timestamp(skb, sockc->tsflags);
skb_zcopy_set_nouarg(skb, ph.raw);
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
to_write = tp_len;
if (sock->type == SOCK_DGRAM) {
err = dev_hard_header(skb, dev, ntohs(proto), addr,
NULL, tp_len);
if (unlikely(err < 0))
return -EINVAL;
} else if (copylen) {
int hdrlen = min_t(int, copylen, tp_len);
skb_push(skb, dev->hard_header_len);
skb_put(skb, copylen - dev->hard_header_len);
err = skb_store_bits(skb, 0, data, hdrlen);
if (unlikely(err))
return err;
if (!dev_validate_header(dev, skb->data, hdrlen))
return -EINVAL;
data += hdrlen;
to_write -= hdrlen;
}
offset = offset_in_page(data);
len_max = PAGE_SIZE - offset;
len = ((to_write > len_max) ? len_max : to_write);
skb->data_len = to_write;
skb->len += to_write;
skb->truesize += to_write;
refcount_add(to_write, &po->sk.sk_wmem_alloc);
while (likely(to_write)) {
nr_frags = skb_shinfo(skb)->nr_frags;
if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
pr_err("Packet exceed the number of skb frags(%u)\n",
(unsigned int)MAX_SKB_FRAGS);
return -EFAULT;
}
page = pgv_to_page(data);
data += len;
flush_dcache_page(page);
get_page(page);
skb_fill_page_desc(skb, nr_frags, page, offset, len);
to_write -= len;
offset = 0;
len_max = PAGE_SIZE;
len = ((to_write > len_max) ? len_max : to_write);
}
packet_parse_headers(skb, sock);
return tp_len;
}
static int tpacket_parse_header(struct packet_sock *po, void *frame,
int size_max, void **data)
{
union tpacket_uhdr ph;
int tp_len, off;
ph.raw = frame;
switch (po->tp_version) {
case TPACKET_V3:
if (ph.h3->tp_next_offset != 0) {
pr_warn_once("variable sized slot not supported");
return -EINVAL;
}
tp_len = ph.h3->tp_len;
break;
case TPACKET_V2:
tp_len = ph.h2->tp_len;
break;
default:
tp_len = ph.h1->tp_len;
break;
}
if (unlikely(tp_len > size_max)) {
pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
return -EMSGSIZE;
}
if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) {
int off_min, off_max;
off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
off_max = po->tx_ring.frame_size - tp_len;
if (po->sk.sk_type == SOCK_DGRAM) {
switch (po->tp_version) {
case TPACKET_V3:
off = ph.h3->tp_net;
break;
case TPACKET_V2:
off = ph.h2->tp_net;
break;
default:
off = ph.h1->tp_net;
break;
}
} else {
switch (po->tp_version) {
case TPACKET_V3:
off = ph.h3->tp_mac;
break;
case TPACKET_V2:
off = ph.h2->tp_mac;
break;
default:
off = ph.h1->tp_mac;
break;
}
}
if (unlikely((off < off_min) || (off_max < off)))
return -EINVAL;
} else {
off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
}
*data = frame + off;
return tp_len;
}
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
{
struct sk_buff *skb = NULL;
struct net_device *dev;
struct virtio_net_hdr *vnet_hdr = NULL;
struct sockcm_cookie sockc;
__be16 proto;
int err, reserve = 0;
void *ph;
DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
unsigned char *addr = NULL;
int tp_len, size_max;
void *data;
int len_sum = 0;
int status = TP_STATUS_AVAILABLE;
int hlen, tlen, copylen = 0;
long timeo = 0;
mutex_lock(&po->pg_vec_lock);
/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
* we need to confirm it under protection of pg_vec_lock.
*/
if (unlikely(!po->tx_ring.pg_vec)) {
err = -EBUSY;
goto out;
}
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = READ_ONCE(po->num);
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
goto out;
if (msg->msg_namelen < (saddr->sll_halen
+ offsetof(struct sockaddr_ll,
sll_addr)))
goto out;
proto = saddr->sll_protocol;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
if (po->sk.sk_socket->type == SOCK_DGRAM) {
if (dev && msg->msg_namelen < dev->addr_len +
offsetof(struct sockaddr_ll, sll_addr))
goto out_put;
addr = saddr->sll_addr;
}
}
err = -ENXIO;
if (unlikely(dev == NULL))
goto out;
err = -ENETDOWN;
if (unlikely(!(dev->flags & IFF_UP)))
goto out_put;
sockcm_init(&sockc, &po->sk);
if (msg->msg_controllen) {
err = sock_cmsg_send(&po->sk, msg, &sockc);
if (unlikely(err))
goto out_put;
}
if (po->sk.sk_socket->type == SOCK_RAW)
reserve = dev->hard_header_len;
size_max = po->tx_ring.frame_size
- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz)
size_max = dev->mtu + reserve + VLAN_HLEN;
reinit_completion(&po->skb_completion);
do {
ph = packet_current_frame(po, &po->tx_ring,
TP_STATUS_SEND_REQUEST);
if (unlikely(ph == NULL)) {
if (need_wait && skb) {
timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
if (timeo <= 0) {
err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
goto out_put;
}
}
/* check for additional frames */
continue;
}
skb = NULL;
tp_len = tpacket_parse_header(po, ph, size_max, &data);
if (tp_len < 0)
goto tpacket_error;
status = TP_STATUS_SEND_REQUEST;
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
if (vnet_hdr_sz) {
vnet_hdr = data;
data += vnet_hdr_sz;
tp_len -= vnet_hdr_sz;
if (tp_len < 0 ||
__packet_snd_vnet_parse(vnet_hdr, tp_len)) {
tp_len = -EINVAL;
goto tpacket_error;
}
copylen = __virtio16_to_cpu(vio_le(),
vnet_hdr->hdr_len);
}
copylen = max_t(int, copylen, dev->hard_header_len);
skb = sock_alloc_send_skb(&po->sk,
hlen + tlen + sizeof(struct sockaddr_ll) +
(copylen - dev->hard_header_len),
!need_wait, &err);
if (unlikely(skb == NULL)) {
/* we assume the socket was initially writeable ... */
if (likely(len_sum > 0))
err = len_sum;
goto out_status;
}
tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
addr, hlen, copylen, &sockc);
if (likely(tp_len >= 0) &&
tp_len > dev->mtu + reserve &&
!vnet_hdr_sz &&
!packet_extra_vlan_len_allowed(dev, skb))
tp_len = -EMSGSIZE;
if (unlikely(tp_len < 0)) {
tpacket_error:
if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) {
__packet_set_status(po, ph,
TP_STATUS_AVAILABLE);
packet_increment_head(&po->tx_ring);
kfree_skb(skb);
continue;
} else {
status = TP_STATUS_WRONG_FORMAT;
err = tp_len;
goto out_status;
}
}
if (vnet_hdr_sz) {
if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
tp_len = -EINVAL;
goto tpacket_error;
}
virtio_net_hdr_set_proto(skb, vnet_hdr);
}
skb->destructor = tpacket_destruct_skb;
__packet_set_status(po, ph, TP_STATUS_SENDING);
packet_inc_pending(&po->tx_ring);
status = TP_STATUS_SEND_REQUEST;
err = packet_xmit(po, skb);
if (unlikely(err != 0)) {
if (err > 0)
err = net_xmit_errno(err);
if (err && __packet_get_status(po, ph) ==
TP_STATUS_AVAILABLE) {
/* skb was destructed already */
skb = NULL;
goto out_status;
}
/*
* skb was dropped but not destructed yet;
* let's treat it like congestion or err < 0
*/
err = 0;
}
packet_increment_head(&po->tx_ring);
len_sum += tp_len;
} while (likely((ph != NULL) ||
/* Note: packet_read_pending() might be slow if we have
* to call it as it's per_cpu variable, but in fast-path
* we already short-circuit the loop with the first
* condition, and luckily don't have to go that path
* anyway.
*/
(need_wait && packet_read_pending(&po->tx_ring))));
err = len_sum;
goto out_put;
out_status:
__packet_set_status(po, ph, status);
kfree_skb(skb);
out_put:
dev_put(dev);
out:
mutex_unlock(&po->pg_vec_lock);
return err;
}
static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
size_t reserve, size_t len,
size_t linear, int noblock,
int *err)
{
struct sk_buff *skb;
/* Under a page? Don't bother with paged skb. */
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
err, PAGE_ALLOC_COSTLY_ORDER);
if (!skb)
return NULL;
skb_reserve(skb, reserve);
skb_put(skb, linear);
skb->data_len = len - linear;
skb->len += len - linear;
return skb;
}
static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
struct sk_buff *skb;
struct net_device *dev;
__be16 proto;
unsigned char *addr = NULL;
int err, reserve = 0;
struct sockcm_cookie sockc;
struct virtio_net_hdr vnet_hdr = { 0 };
int offset = 0;
struct packet_sock *po = pkt_sk(sk);
int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
int hlen, tlen, linear;
int extra_len = 0;
/*
* Get and verify the address.
*/
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = READ_ONCE(po->num);
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
goto out;
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
goto out;
proto = saddr->sll_protocol;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
if (sock->type == SOCK_DGRAM) {
if (dev && msg->msg_namelen < dev->addr_len +
offsetof(struct sockaddr_ll, sll_addr))
goto out_unlock;
addr = saddr->sll_addr;
}
}
err = -ENXIO;
if (unlikely(dev == NULL))
goto out_unlock;
err = -ENETDOWN;
if (unlikely(!(dev->flags & IFF_UP)))
goto out_unlock;
sockcm_init(&sockc, sk);
sockc.mark = READ_ONCE(sk->sk_mark);
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
if (unlikely(err))
goto out_unlock;
}
if (sock->type == SOCK_RAW)
reserve = dev->hard_header_len;
if (vnet_hdr_sz) {
err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
if (err)
goto out_unlock;
}
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
if (!netif_supports_nofcs(dev)) {
err = -EPROTONOSUPPORT;
goto out_unlock;
}
extra_len = 4; /* We're doing our own CRC */
}
err = -EMSGSIZE;
if (!vnet_hdr.gso_type &&
(len > dev->mtu + reserve + VLAN_HLEN + extra_len))
goto out_unlock;
err = -ENOBUFS;
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
linear = max(linear, min_t(int, len, dev->hard_header_len));
skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;
skb_reset_network_header(skb);
err = -EINVAL;
if (sock->type == SOCK_DGRAM) {
offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
if (unlikely(offset < 0))
goto out_free;
} else if (reserve) {
skb_reserve(skb, -reserve);
if (len < reserve + sizeof(struct ipv6hdr) &&
dev->min_header_len != dev->hard_header_len)
skb_reset_network_header(skb);
}
/* Returns -EFAULT on error */
err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
if (err)
goto out_free;
if ((sock->type == SOCK_RAW &&
!dev_validate_header(dev, skb->data, len)) || !skb->len) {
err = -EINVAL;
goto out_free;
}
skb_setup_tx_timestamp(skb, sockc.tsflags);
if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
!packet_extra_vlan_len_allowed(dev, skb)) {
err = -EMSGSIZE;
goto out_free;
}
skb->protocol = proto;
skb->dev = dev;
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc.mark;
skb->tstamp = sockc.transmit_time;
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
packet_parse_headers(skb, sock);
if (vnet_hdr_sz) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
if (err)
goto out_free;
len += vnet_hdr_sz;
virtio_net_hdr_set_proto(skb, &vnet_hdr);
}
err = packet_xmit(po, skb);
if (unlikely(err != 0)) {
if (err > 0)
err = net_xmit_errno(err);
if (err)
goto out_unlock;
}
dev_put(dev);
return len;
out_free:
kfree_skb(skb);
out_unlock:
dev_put(dev);
out:
return err;
}
static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
/* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
* tpacket_snd() will redo the check safely.
*/
if (data_race(po->tx_ring.pg_vec))
return tpacket_snd(po, msg);
return packet_snd(sock, msg, len);
}
/*
* Close a PACKET socket. This is fairly simple. We immediately go
* to 'closed' state and remove our protocol entry in the device list.
*/
static int packet_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct packet_sock *po;
struct packet_fanout *f;
struct net *net;
union tpacket_req_u req_u;
if (!sk)
return 0;
net = sock_net(sk);
po = pkt_sk(sk);
mutex_lock(&net->packet.sklist_lock);
sk_del_node_init_rcu(sk);
mutex_unlock(&net->packet.sklist_lock);
sock_prot_inuse_add(net, sk->sk_prot, -1);
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, false);
packet_cached_dev_reset(po);
if (po->prot_hook.dev) {
netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
packet_flush_mclist(sk);
lock_sock(sk);
if (po->rx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 0);
}
if (po->tx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 1);
}
release_sock(sk);
f = fanout_release(sk);
synchronize_net();
kfree(po->rollover);
if (f) {
fanout_release_data(f);
kvfree(f);
}
/*
* Now the socket is dead. No more input will appear.
*/
sock_orphan(sk);
sock->sk = NULL;
/* Purge queues */
skb_queue_purge(&sk->sk_receive_queue);
packet_free_pending(po);
sock_put(sk);
return 0;
}
/*
* Attach a packet hook.
*/
static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
__be16 proto)
{
struct packet_sock *po = pkt_sk(sk);
struct net_device *dev = NULL;
bool unlisted = false;
bool need_rehook;
int ret = 0;
lock_sock(sk);
spin_lock(&po->bind_lock);
if (!proto)
proto = po->num;
rcu_read_lock();
if (po->fanout) {
ret = -EINVAL;
goto out_unlock;
}
if (name) {
dev = dev_get_by_name_rcu(sock_net(sk), name);
if (!dev) {
ret = -ENODEV;
goto out_unlock;
}
} else if (ifindex) {
dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
if (!dev) {
ret = -ENODEV;
goto out_unlock;
}
}
need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev;
if (need_rehook) {
dev_hold(dev);
if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
rcu_read_unlock();
/* prevents packet_notifier() from calling
* register_prot_hook()
*/
WRITE_ONCE(po->num, 0);
__unregister_prot_hook(sk, true);
rcu_read_lock();
if (dev)
unlisted = !dev_get_by_index_rcu(sock_net(sk),
dev->ifindex);
}
BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING));
WRITE_ONCE(po->num, proto);
po->prot_hook.type = proto;
netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
if (unlikely(unlisted)) {
po->prot_hook.dev = NULL;
WRITE_ONCE(po->ifindex, -1);
packet_cached_dev_reset(po);
} else {
netdev_hold(dev, &po->prot_hook.dev_tracker,
GFP_ATOMIC);
po->prot_hook.dev = dev;
WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
packet_cached_dev_assign(po, dev);
}
dev_put(dev);
}
if (proto == 0 || !need_rehook)
goto out_unlock;
if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
register_prot_hook(sk);
} else {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
}
out_unlock:
rcu_read_unlock();
spin_unlock(&po->bind_lock);
release_sock(sk);
return ret;
}
/*
* Bind a packet socket to a device
*/
static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
int addr_len)
{
struct sock *sk = sock->sk;
char name[sizeof(uaddr->sa_data_min) + 1];
/*
* Check legality
*/
if (addr_len != sizeof(struct sockaddr))
return -EINVAL;
/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
* zero-terminated.
*/
memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
name[sizeof(uaddr->sa_data_min)] = 0;
return packet_do_bind(sk, name, 0, 0);
}
static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
struct sock *sk = sock->sk;
/*
* Check legality
*/
if (addr_len < sizeof(struct sockaddr_ll))
return -EINVAL;
if (sll->sll_family != AF_PACKET)
return -EINVAL;
return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol);
}
static struct proto packet_proto = {
.name = "PACKET",
.owner = THIS_MODULE,
.obj_size = sizeof(struct packet_sock),
};
/*
* Create a packet of type SOCK_PACKET.
*/
static int packet_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct packet_sock *po;
__be16 proto = (__force __be16)protocol; /* weird, but documented */
int err;
if (!ns_capable(net->user_ns, CAP_NET_RAW))
return -EPERM;
if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
sock->type != SOCK_PACKET)
return -ESOCKTNOSUPPORT;
sock->state = SS_UNCONNECTED;
err = -ENOBUFS;
sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
if (sk == NULL)
goto out;
sock->ops = &packet_ops;
if (sock->type == SOCK_PACKET)
sock->ops = &packet_ops_spkt;
sock_init_data(sock, sk);
po = pkt_sk(sk);
init_completion(&po->skb_completion);
sk->sk_family = PF_PACKET;
po->num = proto;
err = packet_alloc_pending(po);
if (err)
goto out2;
packet_cached_dev_reset(po);
sk->sk_destruct = packet_sock_destruct;
/*
* Attach a protocol block
*/
spin_lock_init(&po->bind_lock);
mutex_init(&po->pg_vec_lock);
po->rollover = NULL;
po->prot_hook.func = packet_rcv;
if (sock->type == SOCK_PACKET)
po->prot_hook.func = packet_rcv_spkt;
po->prot_hook.af_packet_priv = sk;
po->prot_hook.af_packet_net = sock_net(sk);
if (proto) {
po->prot_hook.type = proto;
__register_prot_hook(sk);
}
mutex_lock(&net->packet.sklist_lock);
sk_add_node_tail_rcu(sk, &net->packet.sklist);
mutex_unlock(&net->packet.sklist_lock);
sock_prot_inuse_add(net, &packet_proto, 1);
return 0;
out2:
sk_free(sk);
out:
return err;
}
/*
* Pull a packet from our receive queue and hand it to the user.
* If necessary we block.
*/
static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied, err;
int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz);
unsigned int origlen = 0;
err = -EINVAL;
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
goto out;
#if 0
/* What error should we return now? EUNATTACH? */
if (pkt_sk(sk)->ifindex < 0)
return -ENODEV;
#endif
if (flags & MSG_ERRQUEUE) {
err = sock_recv_errqueue(sk, msg, len,
SOL_PACKET, PACKET_TX_TIMESTAMP);
goto out;
}
/*
* Call the generic datagram receiver. This handles all sorts
* of horrible races and re-entrancy so we can forget about it
* in the protocol layers.
*
* Now it will return ENETDOWN, if device have just gone down,
* but then it will block.
*/
skb = skb_recv_datagram(sk, flags, &err);
/*
* An error occurred so return it. Because skb_recv_datagram()
* handles the blocking we don't see and worry about blocking
* retries.
*/
if (skb == NULL)
goto out;
packet_rcv_try_clear_pressure(pkt_sk(sk));
if (vnet_hdr_len) {
err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
if (err)
goto out_free;
}
/* You lose any data beyond the buffer you gave. If it worries
* a user program they can ask the device for its MTU
* anyway.
*/
copied = skb->len;
if (copied > len) {
copied = len;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto out_free;
if (sock->type != SOCK_PACKET) {
struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
/* Original length was stored in sockaddr_ll fields */
origlen = PACKET_SKB_CB(skb)->sa.origlen;
sll->sll_family = AF_PACKET;
sll->sll_protocol = skb->protocol;
}
sock_recv_cmsgs(msg, sk, skb);
if (msg->msg_name) {
const size_t max_len = min(sizeof(skb->cb),
sizeof(struct sockaddr_storage));
int copy_len;
/* If the address length field is there to be filled
* in, we fill it in now.
*/
if (sock->type == SOCK_PACKET) {
__sockaddr_check_size(sizeof(struct sockaddr_pkt));
msg->msg_namelen = sizeof(struct sockaddr_pkt);
copy_len = msg->msg_namelen;
} else {
struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
msg->msg_namelen = sll->sll_halen +
offsetof(struct sockaddr_ll, sll_addr);
copy_len = msg->msg_namelen;
if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
memset(msg->msg_name +
offsetof(struct sockaddr_ll, sll_addr),
0, sizeof(sll->sll_addr));
msg->msg_namelen = sizeof(struct sockaddr_ll);
}
}
if (WARN_ON_ONCE(copy_len > max_len)) {
copy_len = max_len;
msg->msg_namelen = copy_len;
}
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
}
if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
struct tpacket_auxdata aux;
aux.tp_status = TP_STATUS_USER;
if (skb->ip_summed == CHECKSUM_PARTIAL)
aux.tp_status |= TP_STATUS_CSUMNOTREADY;
else if (skb->pkt_type != PACKET_OUTGOING &&
skb_csum_unnecessary(skb))
aux.tp_status |= TP_STATUS_CSUM_VALID;
if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
aux.tp_status |= TP_STATUS_GSO_TCP;
aux.tp_len = origlen;
aux.tp_snaplen = skb->len;
aux.tp_mac = 0;
aux.tp_net = skb_network_offset(skb);
if (skb_vlan_tag_present(skb)) {
aux.tp_vlan_tci = skb_vlan_tag_get(skb);
aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
aux.tp_vlan_tci = 0;
aux.tp_vlan_tpid = 0;
}
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
}
/*
* Free or return the buffer as appropriate. Again this
* hides all the races and re-entrancy issues from us.
*/
err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
out_free:
skb_free_datagram(sk, skb);
out:
return err;
}
static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct net_device *dev;
struct sock *sk = sock->sk;
if (peer)
return -EOPNOTSUPP;
uaddr->sa_family = AF_PACKET;
memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min));
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
if (dev)
strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min));
rcu_read_unlock();
return sizeof(*uaddr);
}
static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct net_device *dev;
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
int ifindex;
if (peer)
return -EOPNOTSUPP;
ifindex = READ_ONCE(po->ifindex);
sll->sll_family = AF_PACKET;
sll->sll_ifindex = ifindex;
sll->sll_protocol = READ_ONCE(po->num);
sll->sll_pkttype = 0;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
memcpy(sll->sll_addr_flex, dev->dev_addr, dev->addr_len);
} else {
sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
sll->sll_halen = 0;
}
rcu_read_unlock();
return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
}
static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
int what)
{
switch (i->type) {
case PACKET_MR_MULTICAST:
if (i->alen != dev->addr_len)
return -EINVAL;
if (what > 0)
return dev_mc_add(dev, i->addr);
else
return dev_mc_del(dev, i->addr);
break;
case PACKET_MR_PROMISC:
return dev_set_promiscuity(dev, what);
case PACKET_MR_ALLMULTI:
return dev_set_allmulti(dev, what);
case PACKET_MR_UNICAST:
if (i->alen != dev->addr_len)
return -EINVAL;
if (what > 0)
return dev_uc_add(dev, i->addr);
else
return dev_uc_del(dev, i->addr);
break;
default:
break;
}
return 0;
}
static void packet_dev_mclist_delete(struct net_device *dev,
struct packet_mclist **mlp)
{
struct packet_mclist *ml;
while ((ml = *mlp) != NULL) {
if (ml->ifindex == dev->ifindex) {
packet_dev_mc(dev, ml, -1);
*mlp = ml->next;
kfree(ml);
} else
mlp = &ml->next;
}
}
static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
{
struct packet_sock *po = pkt_sk(sk);
struct packet_mclist *ml, *i;
struct net_device *dev;
int err;
rtnl_lock();
err = -ENODEV;
dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
if (!dev)
goto done;
err = -EINVAL;
if (mreq->mr_alen > dev->addr_len)
goto done;
err = -ENOBUFS;
i = kmalloc(sizeof(*i), GFP_KERNEL);
if (i == NULL)
goto done;
err = 0;
for (ml = po->mclist; ml; ml = ml->next) {
if (ml->ifindex == mreq->mr_ifindex &&
ml->type == mreq->mr_type &&
ml->alen == mreq->mr_alen &&
memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
ml->count++;
/* Free the new element ... */
kfree(i);
goto done;
}
}
i->type = mreq->mr_type;
i->ifindex = mreq->mr_ifindex;
i->alen = mreq->mr_alen;
memcpy(i->addr, mreq->mr_address, i->alen);
memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
i->count = 1;
i->next = po->mclist;
po->mclist = i;
err = packet_dev_mc(dev, i, 1);
if (err) {
po->mclist = i->next;
kfree(i);
}
done:
rtnl_unlock();
return err;
}
static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
{
struct packet_mclist *ml, **mlp;
rtnl_lock();
for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
if (ml->ifindex == mreq->mr_ifindex &&
ml->type == mreq->mr_type &&
ml->alen == mreq->mr_alen &&
memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
if (--ml->count == 0) {
struct net_device *dev;
*mlp = ml->next;
dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
if (dev)
packet_dev_mc(dev, ml, -1);
kfree(ml);
}
break;
}
}
rtnl_unlock();
return 0;
}
static void packet_flush_mclist(struct sock *sk)
{
struct packet_sock *po = pkt_sk(sk);
struct packet_mclist *ml;
if (!po->mclist)
return;
rtnl_lock();
while ((ml = po->mclist) != NULL) {
struct net_device *dev;
po->mclist = ml->next;
dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
if (dev != NULL)
packet_dev_mc(dev, ml, -1);
kfree(ml);
}
rtnl_unlock();
}
static int
packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
unsigned int optlen)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
int ret;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
switch (optname) {
case PACKET_ADD_MEMBERSHIP:
case PACKET_DROP_MEMBERSHIP:
{
struct packet_mreq_max mreq;
int len = optlen;
memset(&mreq, 0, sizeof(mreq));
if (len < sizeof(struct packet_mreq))
return -EINVAL;
if (len > sizeof(mreq))
len = sizeof(mreq);
if (copy_from_sockptr(&mreq, optval, len))
return -EFAULT;
if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
return -EINVAL;
if (optname == PACKET_ADD_MEMBERSHIP)
ret = packet_mc_add(sk, &mreq);
else
ret = packet_mc_drop(sk, &mreq);
return ret;
}
case PACKET_RX_RING:
case PACKET_TX_RING:
{
union tpacket_req_u req_u;
int len;
lock_sock(sk);
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
len = sizeof(req_u.req);
break;
case TPACKET_V3:
default:
len = sizeof(req_u.req3);
break;
}
if (optlen < len) {
ret = -EINVAL;
} else {
if (copy_from_sockptr(&req_u.req, optval, len))
ret = -EFAULT;
else
ret = packet_set_ring(sk, &req_u, 0,
optname == PACKET_TX_RING);
}
release_sock(sk);
return ret;
}
case PACKET_COPY_THRESH:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
pkt_sk(sk)->copy_thresh = val;
return 0;
}
case PACKET_VERSION:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
switch (val) {
case TPACKET_V1:
case TPACKET_V2:
case TPACKET_V3:
break;
default:
return -EINVAL;
}
lock_sock(sk);
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
ret = -EBUSY;
} else {
po->tp_version = val;
ret = 0;
}
release_sock(sk);
return ret;
}
case PACKET_RESERVE:
{
unsigned int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
if (val > INT_MAX)
return -EINVAL;
lock_sock(sk);
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
ret = -EBUSY;
} else {
po->tp_reserve = val;
ret = 0;
}
release_sock(sk);
return ret;
}
case PACKET_LOSS:
{
unsigned int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
lock_sock(sk);
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
ret = -EBUSY;
} else {
packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val);
ret = 0;
}
release_sock(sk);
return ret;
}
case PACKET_AUXDATA:
{
int val;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
return 0;
}
case PACKET_ORIGDEV:
{
int val;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
return 0;
}
case PACKET_VNET_HDR:
case PACKET_VNET_HDR_SZ:
{
int val, hdr_len;
if (sock->type != SOCK_RAW)
return -EINVAL;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
if (optname == PACKET_VNET_HDR_SZ) {
if (val && val != sizeof(struct virtio_net_hdr) &&
val != sizeof(struct virtio_net_hdr_mrg_rxbuf))
return -EINVAL;
hdr_len = val;
} else {
hdr_len = val ? sizeof(struct virtio_net_hdr) : 0;
}
lock_sock(sk);
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
ret = -EBUSY;
} else {
WRITE_ONCE(po->vnet_hdr_sz, hdr_len);
ret = 0;
}
release_sock(sk);
return ret;
}
case PACKET_TIMESTAMP:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
WRITE_ONCE(po->tp_tstamp, val);
return 0;
}
case PACKET_FANOUT:
{
struct fanout_args args = { 0 };
if (optlen != sizeof(int) && optlen != sizeof(args))
return -EINVAL;
if (copy_from_sockptr(&args, optval, optlen))
return -EFAULT;
return fanout_add(sk, &args);
}
case PACKET_FANOUT_DATA:
{
/* Paired with the WRITE_ONCE() in fanout_add() */
if (!READ_ONCE(po->fanout))
return -EINVAL;
return fanout_set_data(po, optval, optlen);
}
case PACKET_IGNORE_OUTGOING:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
if (val < 0 || val > 1)
return -EINVAL;
po->prot_hook.ignore_outgoing = !!val;
return 0;
}
case PACKET_TX_HAS_OFF:
{
unsigned int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
lock_sock(sk);
if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val);
release_sock(sk);
return 0;
}
case PACKET_QDISC_BYPASS:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
packet_sock_flag_set(po, PACKET_SOCK_QDISC_BYPASS, val);
return 0;
}
default:
return -ENOPROTOOPT;
}
}
static int packet_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
int len;
int val, lv = sizeof(val);
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
void *data = &val;
union tpacket_stats_u st;
struct tpacket_rollover_stats rstats;
int drops;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case PACKET_STATISTICS:
spin_lock_bh(&sk->sk_receive_queue.lock);
memcpy(&st, &po->stats, sizeof(st));
memset(&po->stats, 0, sizeof(po->stats));
spin_unlock_bh(&sk->sk_receive_queue.lock);
drops = atomic_xchg(&po->tp_drops, 0);
if (po->tp_version == TPACKET_V3) {
lv = sizeof(struct tpacket_stats_v3);
st.stats3.tp_drops = drops;
st.stats3.tp_packets += drops;
data = &st.stats3;
} else {
lv = sizeof(struct tpacket_stats);
st.stats1.tp_drops = drops;
st.stats1.tp_packets += drops;
data = &st.stats1;
}
break;
case PACKET_AUXDATA:
val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
break;
case PACKET_ORIGDEV:
val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
break;
case PACKET_VNET_HDR:
val = !!READ_ONCE(po->vnet_hdr_sz);
break;
case PACKET_VNET_HDR_SZ:
val = READ_ONCE(po->vnet_hdr_sz);
break;
case PACKET_VERSION:
val = po->tp_version;
break;
case PACKET_HDRLEN:
if (len > sizeof(int))
len = sizeof(int);
if (len < sizeof(int))
return -EINVAL;
if (copy_from_user(&val, optval, len))
return -EFAULT;
switch (val) {
case TPACKET_V1:
val = sizeof(struct tpacket_hdr);
break;
case TPACKET_V2:
val = sizeof(struct tpacket2_hdr);
break;
case TPACKET_V3:
val = sizeof(struct tpacket3_hdr);
break;
default:
return -EINVAL;
}
break;
case PACKET_RESERVE:
val = po->tp_reserve;
break;
case PACKET_LOSS:
val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS);
break;
case PACKET_TIMESTAMP:
val = READ_ONCE(po->tp_tstamp);
break;
case PACKET_FANOUT:
val = (po->fanout ?
((u32)po->fanout->id |
((u32)po->fanout->type << 16) |
((u32)po->fanout->flags << 24)) :
0);
break;
case PACKET_IGNORE_OUTGOING:
val = po->prot_hook.ignore_outgoing;
break;
case PACKET_ROLLOVER_STATS:
if (!po->rollover)
return -EINVAL;
rstats.tp_all = atomic_long_read(&po->rollover->num);
rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
data = &rstats;
lv = sizeof(rstats);
break;
case PACKET_TX_HAS_OFF:
val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF);
break;
case PACKET_QDISC_BYPASS:
val = packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS);
break;
default:
return -ENOPROTOOPT;
}
if (len > lv)
len = lv;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, data, len))
return -EFAULT;
return 0;
}
static int packet_notifier(struct notifier_block *this,
unsigned long msg, void *ptr)
{
struct sock *sk;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
rcu_read_lock();
sk_for_each_rcu(sk, &net->packet.sklist) {
struct packet_sock *po = pkt_sk(sk);
switch (msg) {
case NETDEV_UNREGISTER:
if (po->mclist)
packet_dev_mclist_delete(dev, &po->mclist);
fallthrough;
case NETDEV_DOWN:
if (dev->ifindex == po->ifindex) {
spin_lock(&po->bind_lock);
if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
__unregister_prot_hook(sk, false);
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
packet_cached_dev_reset(po);
WRITE_ONCE(po->ifindex, -1);
netdev_put(po->prot_hook.dev,
&po->prot_hook.dev_tracker);
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
}
break;
case NETDEV_UP:
if (dev->ifindex == po->ifindex) {
spin_lock(&po->bind_lock);
if (po->num)
register_prot_hook(sk);
spin_unlock(&po->bind_lock);
}
break;
}
}
rcu_read_unlock();
return NOTIFY_DONE;
}
static int packet_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct sock *sk = sock->sk;
switch (cmd) {
case SIOCOUTQ:
{
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ:
{
struct sk_buff *skb;
int amount = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
amount = skb->len;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
#ifdef CONFIG_INET
case SIOCADDRT:
case SIOCDELRT:
case SIOCDARP:
case SIOCGARP:
case SIOCSARP:
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCSIFFLAGS:
return inet_dgram_ops.ioctl(sock, cmd, arg);
#endif
default:
return -ENOIOCTLCMD;
}
return 0;
}
static __poll_t packet_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
__poll_t mask = datagram_poll(file, sock, wait);
spin_lock_bh(&sk->sk_receive_queue.lock);
if (po->rx_ring.pg_vec) {
if (!packet_previous_rx_frame(po, &po->rx_ring,
TP_STATUS_KERNEL))
mask |= EPOLLIN | EPOLLRDNORM;
}
packet_rcv_try_clear_pressure(po);
spin_unlock_bh(&sk->sk_receive_queue.lock);
spin_lock_bh(&sk->sk_write_queue.lock);
if (po->tx_ring.pg_vec) {
if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
mask |= EPOLLOUT | EPOLLWRNORM;
}
spin_unlock_bh(&sk->sk_write_queue.lock);
return mask;
}
/* Dirty? Well, I still did not learn better way to account
* for user mmaps.
*/
static void packet_mm_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct socket *sock = file->private_data;
struct sock *sk = sock->sk;
if (sk)
atomic_inc(&pkt_sk(sk)->mapped);
}
static void packet_mm_close(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct socket *sock = file->private_data;
struct sock *sk = sock->sk;
if (sk)
atomic_dec(&pkt_sk(sk)->mapped);
}
static const struct vm_operations_struct packet_mmap_ops = {
.open = packet_mm_open,
.close = packet_mm_close,
};
static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
unsigned int len)
{
int i;
for (i = 0; i < len; i++) {
if (likely(pg_vec[i].buffer)) {
if (is_vmalloc_addr(pg_vec[i].buffer))
vfree(pg_vec[i].buffer);
else
free_pages((unsigned long)pg_vec[i].buffer,
order);
pg_vec[i].buffer = NULL;
}
}
kfree(pg_vec);
}
static char *alloc_one_pg_vec_page(unsigned long order)
{
char *buffer;
gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
__GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
buffer = (char *) __get_free_pages(gfp_flags, order);
if (buffer)
return buffer;
/* __get_free_pages failed, fall back to vmalloc */
buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
if (buffer)
return buffer;
/* vmalloc failed, lets dig into swap here */
gfp_flags &= ~__GFP_NORETRY;
buffer = (char *) __get_free_pages(gfp_flags, order);
if (buffer)
return buffer;
/* complete and utter failure */
return NULL;
}
static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
{
unsigned int block_nr = req->tp_block_nr;
struct pgv *pg_vec;
int i;
pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!pg_vec))
goto out;
for (i = 0; i < block_nr; i++) {
pg_vec[i].buffer = alloc_one_pg_vec_page(order);
if (unlikely(!pg_vec[i].buffer))
goto out_free_pgvec;
}
out:
return pg_vec;
out_free_pgvec:
free_pg_vec(pg_vec, order, block_nr);
pg_vec = NULL;
goto out;
}
static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring)
{
struct pgv *pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
unsigned long *rx_owner_map = NULL;
int was_running, order = 0;
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
__be16 num;
int err;
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
rb = tx_ring ? &po->tx_ring : &po->rx_ring;
rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
err = -EBUSY;
if (!closing) {
if (atomic_read(&po->mapped))
goto out;
if (packet_read_pending(rb))
goto out;
}
if (req->tp_block_nr) {
unsigned int min_frame_size;
/* Sanity tests and some calculations */
err = -EBUSY;
if (unlikely(rb->pg_vec))
goto out;
switch (po->tp_version) {
case TPACKET_V1:
po->tp_hdrlen = TPACKET_HDRLEN;
break;
case TPACKET_V2:
po->tp_hdrlen = TPACKET2_HDRLEN;
break;
case TPACKET_V3:
po->tp_hdrlen = TPACKET3_HDRLEN;
break;
}
err = -EINVAL;
if (unlikely((int)req->tp_block_size <= 0))
goto out;
if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
goto out;
min_frame_size = po->tp_hdrlen + po->tp_reserve;
if (po->tp_version >= TPACKET_V3 &&
req->tp_block_size <
BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
goto out;
if (unlikely(req->tp_frame_size < min_frame_size))
goto out;
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
goto out;
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
if (unlikely(rb->frames_per_block == 0))
goto out;
if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;
err = -ENOMEM;
order = get_order(req->tp_block_size);
pg_vec = alloc_pg_vec(req, order);
if (unlikely(!pg_vec))
goto out;
switch (po->tp_version) {
case TPACKET_V3:
/* Block transmit is not supported yet */
if (!tx_ring) {
init_prb_bdqc(po, rb, pg_vec, req_u);
} else {
struct tpacket_req3 *req3 = &req_u->req3;
if (req3->tp_retire_blk_tov ||
req3->tp_sizeof_priv ||
req3->tp_feature_req_word) {
err = -EINVAL;
goto out_free_pg_vec;
}
}
break;
default:
if (!tx_ring) {
rx_owner_map = bitmap_alloc(req->tp_frame_nr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!rx_owner_map)
goto out_free_pg_vec;
}
break;
}
}
/* Done */
else {
err = -EINVAL;
if (unlikely(req->tp_frame_nr))
goto out;
}
/* Detach socket from network */
spin_lock(&po->bind_lock);
was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
num = po->num;
if (was_running) {
WRITE_ONCE(po->num, 0);
__unregister_prot_hook(sk, false);
}
spin_unlock(&po->bind_lock);
synchronize_net();
err = -EBUSY;
mutex_lock(&po->pg_vec_lock);
if (closing || atomic_read(&po->mapped) == 0) {
err = 0;
spin_lock_bh(&rb_queue->lock);
swap(rb->pg_vec, pg_vec);
if (po->tp_version <= TPACKET_V2)
swap(rb->rx_owner_map, rx_owner_map);
rb->frame_max = (req->tp_frame_nr - 1);
rb->head = 0;
rb->frame_size = req->tp_frame_size;
spin_unlock_bh(&rb_queue->lock);
swap(rb->pg_vec_order, order);
swap(rb->pg_vec_len, req->tp_block_nr);
rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
po->prot_hook.func = (po->rx_ring.pg_vec) ?
tpacket_rcv : packet_rcv;
skb_queue_purge(rb_queue);
if (atomic_read(&po->mapped))
pr_err("packet_mmap: vma is busy: %d\n",
atomic_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);
spin_lock(&po->bind_lock);
if (was_running) {
WRITE_ONCE(po->num, num);
register_prot_hook(sk);
}
spin_unlock(&po->bind_lock);
if (pg_vec && (po->tp_version > TPACKET_V2)) {
/* Because we don't support block-based V3 on tx-ring */
if (!tx_ring)
prb_shutdown_retire_blk_timer(po, rb_queue);
}
out_free_pg_vec:
if (pg_vec) {
bitmap_free(rx_owner_map);
free_pg_vec(pg_vec, order, req->tp_block_nr);
}
out:
return err;
}
static int packet_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
unsigned long size, expected_size;
struct packet_ring_buffer *rb;
unsigned long start;
int err = -EINVAL;
int i;
if (vma->vm_pgoff)
return -EINVAL;
mutex_lock(&po->pg_vec_lock);
expected_size = 0;
for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
if (rb->pg_vec) {
expected_size += rb->pg_vec_len
* rb->pg_vec_pages
* PAGE_SIZE;
}
}
if (expected_size == 0)
goto out;
size = vma->vm_end - vma->vm_start;
if (size != expected_size)
goto out;
start = vma->vm_start;
for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
if (rb->pg_vec == NULL)
continue;
for (i = 0; i < rb->pg_vec_len; i++) {
struct page *page;
void *kaddr = rb->pg_vec[i].buffer;
int pg_num;
for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
page = pgv_to_page(kaddr);
err = vm_insert_page(vma, start, page);
if (unlikely(err))
goto out;
start += PAGE_SIZE;
kaddr += PAGE_SIZE;
}
}
}
atomic_inc(&po->mapped);
vma->vm_ops = &packet_mmap_ops;
err = 0;
out:
mutex_unlock(&po->pg_vec_lock);
return err;
}
static const struct proto_ops packet_ops_spkt = {
.family = PF_PACKET,
.owner = THIS_MODULE,
.release = packet_release,
.bind = packet_bind_spkt,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = packet_getname_spkt,
.poll = datagram_poll,
.ioctl = packet_ioctl,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.sendmsg = packet_sendmsg_spkt,
.recvmsg = packet_recvmsg,
.mmap = sock_no_mmap,
};
static const struct proto_ops packet_ops = {
.family = PF_PACKET,
.owner = THIS_MODULE,
.release = packet_release,
.bind = packet_bind,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = packet_getname,
.poll = packet_poll,
.ioctl = packet_ioctl,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = packet_setsockopt,
.getsockopt = packet_getsockopt,
.sendmsg = packet_sendmsg,
.recvmsg = packet_recvmsg,
.mmap = packet_mmap,
};
static const struct net_proto_family packet_family_ops = {
.family = PF_PACKET,
.create = packet_create,
.owner = THIS_MODULE,
};
static struct notifier_block packet_netdev_notifier = {
.notifier_call = packet_notifier,
};
#ifdef CONFIG_PROC_FS
static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct net *net = seq_file_net(seq);
rcu_read_lock();
return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
}
static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
}
static void packet_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int packet_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_printf(seq,
"%*sRefCnt Type Proto Iface R Rmem User Inode\n",
IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
else {
struct sock *s = sk_entry(v);
const struct packet_sock *po = pkt_sk(s);
seq_printf(seq,
"%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
s,
refcount_read(&s->sk_refcnt),
s->sk_type,
ntohs(READ_ONCE(po->num)),
READ_ONCE(po->ifindex),
packet_sock_flag(po, PACKET_SOCK_RUNNING),
atomic_read(&s->sk_rmem_alloc),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
sock_i_ino(s));
}
return 0;
}
static const struct seq_operations packet_seq_ops = {
.start = packet_seq_start,
.next = packet_seq_next,
.stop = packet_seq_stop,
.show = packet_seq_show,
};
#endif
static int __net_init packet_net_init(struct net *net)
{
mutex_init(&net->packet.sklist_lock);
INIT_HLIST_HEAD(&net->packet.sklist);
#ifdef CONFIG_PROC_FS
if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
sizeof(struct seq_net_private)))
return -ENOMEM;
#endif /* CONFIG_PROC_FS */
return 0;
}
static void __net_exit packet_net_exit(struct net *net)
{
remove_proc_entry("packet", net->proc_net);
WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
}
static struct pernet_operations packet_net_ops = {
.init = packet_net_init,
.exit = packet_net_exit,
};
static void __exit packet_exit(void)
{
sock_unregister(PF_PACKET);
proto_unregister(&packet_proto);
unregister_netdevice_notifier(&packet_netdev_notifier);
unregister_pernet_subsys(&packet_net_ops);
}
static int __init packet_init(void)
{
int rc;
rc = register_pernet_subsys(&packet_net_ops);
if (rc)
goto out;
rc = register_netdevice_notifier(&packet_netdev_notifier);
if (rc)
goto out_pernet;
rc = proto_register(&packet_proto, 0);
if (rc)
goto out_notifier;
rc = sock_register(&packet_family_ops);
if (rc)
goto out_proto;
return 0;
out_proto:
proto_unregister(&packet_proto);
out_notifier:
unregister_netdevice_notifier(&packet_netdev_notifier);
out_pernet:
unregister_pernet_subsys(&packet_net_ops);
out:
return rc;
}
module_init(packet_init);
module_exit(packet_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_PACKET);
| linux-master | net/packet/af_packet.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011-2014 Autronica Fire and Security AS
*
* Author(s):
* 2011-2014 Arvid Brodin, [email protected]
*
* Event handling for HSR and PRP devices.
*/
#include <linux/netdevice.h>
#include <net/rtnetlink.h>
#include <linux/rculist.h>
#include <linux/timer.h>
#include <linux/etherdevice.h>
#include "hsr_main.h"
#include "hsr_device.h"
#include "hsr_netlink.h"
#include "hsr_framereg.h"
#include "hsr_slave.h"
static bool hsr_slave_empty(struct hsr_priv *hsr)
{
struct hsr_port *port;
hsr_for_each_port(hsr, port)
if (port->type != HSR_PT_MASTER)
return false;
return true;
}
static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct hsr_port *port, *master;
struct net_device *dev;
struct hsr_priv *hsr;
LIST_HEAD(list_kill);
int mtu_max;
int res;
dev = netdev_notifier_info_to_dev(ptr);
port = hsr_port_get_rtnl(dev);
if (!port) {
if (!is_hsr_master(dev))
return NOTIFY_DONE; /* Not an HSR device */
hsr = netdev_priv(dev);
port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
if (!port) {
/* Resend of notification concerning removed device? */
return NOTIFY_DONE;
}
} else {
hsr = port->hsr;
}
switch (event) {
case NETDEV_UP: /* Administrative state DOWN */
case NETDEV_DOWN: /* Administrative state UP */
case NETDEV_CHANGE: /* Link (carrier) state changes */
hsr_check_carrier_and_operstate(hsr);
break;
case NETDEV_CHANGENAME:
if (is_hsr_master(dev))
hsr_debugfs_rename(dev);
break;
case NETDEV_CHANGEADDR:
if (port->type == HSR_PT_MASTER) {
/* This should not happen since there's no
* ndo_set_mac_address() for HSR devices - i.e. not
* supported.
*/
break;
}
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
if (port->type == HSR_PT_SLAVE_A) {
eth_hw_addr_set(master->dev, dev->dev_addr);
call_netdevice_notifiers(NETDEV_CHANGEADDR,
master->dev);
}
/* Make sure we recognize frames from ourselves in hsr_rcv() */
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
res = hsr_create_self_node(hsr,
master->dev->dev_addr,
port ?
port->dev->dev_addr :
master->dev->dev_addr);
if (res)
netdev_warn(master->dev,
"Could not update HSR node address.\n");
break;
case NETDEV_CHANGEMTU:
if (port->type == HSR_PT_MASTER)
break; /* Handled in ndo_change_mtu() */
mtu_max = hsr_get_max_mtu(port->hsr);
master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
master->dev->mtu = mtu_max;
break;
case NETDEV_UNREGISTER:
if (!is_hsr_master(dev)) {
master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
hsr_del_port(port);
if (hsr_slave_empty(master->hsr)) {
const struct rtnl_link_ops *ops;
ops = master->dev->rtnl_link_ops;
ops->dellink(master->dev, &list_kill);
unregister_netdevice_many(&list_kill);
}
}
break;
case NETDEV_PRE_TYPE_CHANGE:
/* HSR works only on Ethernet devices. Refuse slave to change
* its type.
*/
return NOTIFY_BAD;
}
return NOTIFY_DONE;
}
struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
{
struct hsr_port *port;
hsr_for_each_port(hsr, port)
if (port->type == pt)
return port;
return NULL;
}
int hsr_get_version(struct net_device *dev, enum hsr_version *ver)
{
struct hsr_priv *hsr;
hsr = netdev_priv(dev);
*ver = hsr->prot_version;
return 0;
}
EXPORT_SYMBOL(hsr_get_version);
static struct notifier_block hsr_nb = {
.notifier_call = hsr_netdev_notify, /* Slave event notifications */
};
static int __init hsr_init(void)
{
int res;
BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
register_netdevice_notifier(&hsr_nb);
res = hsr_netlink_init();
return res;
}
static void __exit hsr_exit(void)
{
hsr_netlink_exit();
hsr_debugfs_remove_root();
unregister_netdevice_notifier(&hsr_nb);
}
module_init(hsr_init);
module_exit(hsr_exit);
MODULE_LICENSE("GPL");
| linux-master | net/hsr/hsr_main.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011-2014 Autronica Fire and Security AS
*
* Author(s):
* 2011-2014 Arvid Brodin, [email protected]
*
* The HSR spec says never to forward the same frame twice on the same
* interface. A frame is identified by its source MAC address and its HSR
* sequence number. This code keeps track of senders and their sequence numbers
* to allow filtering of duplicate frames, and to detect HSR ring errors.
* Same code handles filtering of duplicates for PRP as well.
*/
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#include "hsr_main.h"
#include "hsr_framereg.h"
#include "hsr_netlink.h"
/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
* false otherwise.
*/
static bool seq_nr_after(u16 a, u16 b)
{
/* Remove inconsistency where
* seq_nr_after(a, b) == seq_nr_before(a, b)
*/
if ((int)b - a == 32768)
return false;
return (((s16)(b - a)) < 0);
}
#define seq_nr_before(a, b) seq_nr_after((b), (a))
#define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b)))
bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
{
struct hsr_self_node *sn;
bool ret = false;
rcu_read_lock();
sn = rcu_dereference(hsr->self_node);
if (!sn) {
WARN_ONCE(1, "HSR: No self node\n");
goto out;
}
if (ether_addr_equal(addr, sn->macaddress_A) ||
ether_addr_equal(addr, sn->macaddress_B))
ret = true;
out:
rcu_read_unlock();
return ret;
}
/* Search for mac entry. Caller must hold rcu read lock.
*/
static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
const unsigned char addr[ETH_ALEN])
{
struct hsr_node *node;
list_for_each_entry_rcu(node, node_db, mac_list) {
if (ether_addr_equal(node->macaddress_A, addr))
return node;
}
return NULL;
}
/* Helper for device init; the self_node is used in hsr_rcv() to recognize
* frames from self that's been looped over the HSR ring.
*/
int hsr_create_self_node(struct hsr_priv *hsr,
const unsigned char addr_a[ETH_ALEN],
const unsigned char addr_b[ETH_ALEN])
{
struct hsr_self_node *sn, *old;
sn = kmalloc(sizeof(*sn), GFP_KERNEL);
if (!sn)
return -ENOMEM;
ether_addr_copy(sn->macaddress_A, addr_a);
ether_addr_copy(sn->macaddress_B, addr_b);
spin_lock_bh(&hsr->list_lock);
old = rcu_replace_pointer(hsr->self_node, sn,
lockdep_is_held(&hsr->list_lock));
spin_unlock_bh(&hsr->list_lock);
if (old)
kfree_rcu(old, rcu_head);
return 0;
}
void hsr_del_self_node(struct hsr_priv *hsr)
{
struct hsr_self_node *old;
spin_lock_bh(&hsr->list_lock);
old = rcu_replace_pointer(hsr->self_node, NULL,
lockdep_is_held(&hsr->list_lock));
spin_unlock_bh(&hsr->list_lock);
if (old)
kfree_rcu(old, rcu_head);
}
void hsr_del_nodes(struct list_head *node_db)
{
struct hsr_node *node;
struct hsr_node *tmp;
list_for_each_entry_safe(node, tmp, node_db, mac_list)
kfree(node);
}
void prp_handle_san_frame(bool san, enum hsr_port_type port,
struct hsr_node *node)
{
/* Mark if the SAN node is over LAN_A or LAN_B */
if (port == HSR_PT_SLAVE_A) {
node->san_a = true;
return;
}
if (port == HSR_PT_SLAVE_B)
node->san_b = true;
}
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
* seq_out is used to initialize filtering of outgoing duplicate frames
* originating from the newly added node.
*/
static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
struct list_head *node_db,
unsigned char addr[],
u16 seq_out, bool san,
enum hsr_port_type rx_port)
{
struct hsr_node *new_node, *node;
unsigned long now;
int i;
new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
if (!new_node)
return NULL;
ether_addr_copy(new_node->macaddress_A, addr);
spin_lock_init(&new_node->seq_out_lock);
/* We are only interested in time diffs here, so use current jiffies
* as initialization. (0 could trigger an spurious ring error warning).
*/
now = jiffies;
for (i = 0; i < HSR_PT_PORTS; i++) {
new_node->time_in[i] = now;
new_node->time_out[i] = now;
}
for (i = 0; i < HSR_PT_PORTS; i++)
new_node->seq_out[i] = seq_out;
if (san && hsr->proto_ops->handle_san_frame)
hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
spin_lock_bh(&hsr->list_lock);
list_for_each_entry_rcu(node, node_db, mac_list,
lockdep_is_held(&hsr->list_lock)) {
if (ether_addr_equal(node->macaddress_A, addr))
goto out;
if (ether_addr_equal(node->macaddress_B, addr))
goto out;
}
list_add_tail_rcu(&new_node->mac_list, node_db);
spin_unlock_bh(&hsr->list_lock);
return new_node;
out:
spin_unlock_bh(&hsr->list_lock);
kfree(new_node);
return node;
}
void prp_update_san_info(struct hsr_node *node, bool is_sup)
{
if (!is_sup)
return;
node->san_a = false;
node->san_b = false;
}
/* Get the hsr_node from which 'skb' was sent.
*/
struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
struct sk_buff *skb, bool is_sup,
enum hsr_port_type rx_port)
{
struct hsr_priv *hsr = port->hsr;
struct hsr_node *node;
struct ethhdr *ethhdr;
struct prp_rct *rct;
bool san = false;
u16 seq_out;
if (!skb_mac_header_was_set(skb))
return NULL;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
list_for_each_entry_rcu(node, node_db, mac_list) {
if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
if (hsr->proto_ops->update_san_info)
hsr->proto_ops->update_san_info(node, is_sup);
return node;
}
if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) {
if (hsr->proto_ops->update_san_info)
hsr->proto_ops->update_san_info(node, is_sup);
return node;
}
}
/* Everyone may create a node entry, connected node to a HSR/PRP
* device.
*/
if (ethhdr->h_proto == htons(ETH_P_PRP) ||
ethhdr->h_proto == htons(ETH_P_HSR)) {
/* Use the existing sequence_nr from the tag as starting point
* for filtering duplicate frames.
*/
seq_out = hsr_get_skb_sequence_nr(skb) - 1;
} else {
rct = skb_get_PRP_rct(skb);
if (rct && prp_check_lsdu_size(skb, rct, is_sup)) {
seq_out = prp_get_skb_sequence_nr(rct);
} else {
if (rx_port != HSR_PT_MASTER)
san = true;
seq_out = HSR_SEQNR_START;
}
}
return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out,
san, rx_port);
}
/* Use the Supervision frame's info about an eventual macaddress_B for merging
* nodes that has previously had their macaddress_B registered as a separate
* node.
*/
void hsr_handle_sup_frame(struct hsr_frame_info *frame)
{
struct hsr_node *node_curr = frame->node_src;
struct hsr_port *port_rcv = frame->port_rcv;
struct hsr_priv *hsr = port_rcv->hsr;
struct hsr_sup_payload *hsr_sp;
struct hsr_sup_tlv *hsr_sup_tlv;
struct hsr_node *node_real;
struct sk_buff *skb = NULL;
struct list_head *node_db;
struct ethhdr *ethhdr;
int i;
unsigned int pull_size = 0;
unsigned int total_pull_size = 0;
/* Here either frame->skb_hsr or frame->skb_prp should be
* valid as supervision frame always will have protocol
* header info.
*/
if (frame->skb_hsr)
skb = frame->skb_hsr;
else if (frame->skb_prp)
skb = frame->skb_prp;
else if (frame->skb_std)
skb = frame->skb_std;
if (!skb)
return;
/* Leave the ethernet header. */
pull_size = sizeof(struct ethhdr);
skb_pull(skb, pull_size);
total_pull_size += pull_size;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* And leave the HSR tag. */
if (ethhdr->h_proto == htons(ETH_P_HSR)) {
pull_size = sizeof(struct hsr_tag);
skb_pull(skb, pull_size);
total_pull_size += pull_size;
}
/* And leave the HSR sup tag. */
pull_size = sizeof(struct hsr_sup_tag);
skb_pull(skb, pull_size);
total_pull_size += pull_size;
/* get HSR sup payload */
hsr_sp = (struct hsr_sup_payload *)skb->data;
/* Merge node_curr (registered on macaddress_B) into node_real */
node_db = &port_rcv->hsr->node_db;
node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
if (!node_real)
/* No frame received from AddrA of this node yet */
node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
HSR_SEQNR_START - 1, true,
port_rcv->type);
if (!node_real)
goto done; /* No mem */
if (node_real == node_curr)
/* Node has already been merged */
goto done;
/* Leave the first HSR sup payload. */
pull_size = sizeof(struct hsr_sup_payload);
skb_pull(skb, pull_size);
total_pull_size += pull_size;
/* Get second supervision tlv */
hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
/* And check if it is a redbox mac TLV */
if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
/* We could stop here after pushing hsr_sup_payload,
* or proceed and allow macaddress_B and for redboxes.
*/
/* Sanity check length */
if (hsr_sup_tlv->HSR_TLV_length != 6)
goto done;
/* Leave the second HSR sup tlv. */
pull_size = sizeof(struct hsr_sup_tlv);
skb_pull(skb, pull_size);
total_pull_size += pull_size;
/* Get redbox mac address. */
hsr_sp = (struct hsr_sup_payload *)skb->data;
/* Check if redbox mac and node mac are equal. */
if (!ether_addr_equal(node_real->macaddress_A, hsr_sp->macaddress_A)) {
/* This is a redbox supervision frame for a VDAN! */
goto done;
}
}
ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
spin_lock_bh(&node_real->seq_out_lock);
for (i = 0; i < HSR_PT_PORTS; i++) {
if (!node_curr->time_in_stale[i] &&
time_after(node_curr->time_in[i], node_real->time_in[i])) {
node_real->time_in[i] = node_curr->time_in[i];
node_real->time_in_stale[i] =
node_curr->time_in_stale[i];
}
if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
node_real->seq_out[i] = node_curr->seq_out[i];
}
spin_unlock_bh(&node_real->seq_out_lock);
node_real->addr_B_port = port_rcv->type;
spin_lock_bh(&hsr->list_lock);
if (!node_curr->removed) {
list_del_rcu(&node_curr->mac_list);
node_curr->removed = true;
kfree_rcu(node_curr, rcu_head);
}
spin_unlock_bh(&hsr->list_lock);
done:
/* Push back here */
skb_push(skb, total_pull_size);
}
/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
*
* If the frame was sent by a node's B interface, replace the source
* address with that node's "official" address (macaddress_A) so that upper
* layers recognize where it came from.
*/
void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
{
if (!skb_mac_header_was_set(skb)) {
WARN_ONCE(1, "%s: Mac header not set\n", __func__);
return;
}
memcpy(ð_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
}
/* 'skb' is a frame meant for another host.
* 'port' is the outgoing interface
*
* Substitute the target (dest) MAC address if necessary, so the it matches the
* recipient interface MAC address, regardless of whether that is the
* recipient's A or B interface.
* This is needed to keep the packets flowing through switches that learn on
* which "side" the different interfaces are.
*/
void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
struct hsr_port *port)
{
struct hsr_node *node_dst;
if (!skb_mac_header_was_set(skb)) {
WARN_ONCE(1, "%s: Mac header not set\n", __func__);
return;
}
if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
return;
node_dst = find_node_by_addr_A(&port->hsr->node_db,
eth_hdr(skb)->h_dest);
if (!node_dst) {
if (port->hsr->prot_version != PRP_V1 && net_ratelimit())
netdev_err(skb->dev, "%s: Unknown node\n", __func__);
return;
}
if (port->type != node_dst->addr_B_port)
return;
if (is_valid_ether_addr(node_dst->macaddress_B))
ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
}
void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
u16 sequence_nr)
{
/* Don't register incoming frames without a valid sequence number. This
* ensures entries of restarted nodes gets pruned so that they can
* re-register and resume communications.
*/
if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
seq_nr_before(sequence_nr, node->seq_out[port->type]))
return;
node->time_in[port->type] = jiffies;
node->time_in_stale[port->type] = false;
}
/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
* ethhdr->h_source address and skb->mac_header set.
*
* Return:
* 1 if frame can be shown to have been sent recently on this interface,
* 0 otherwise, or
* negative error code on error
*/
int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
u16 sequence_nr)
{
spin_lock_bh(&node->seq_out_lock);
if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
time_is_after_jiffies(node->time_out[port->type] +
msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) {
spin_unlock_bh(&node->seq_out_lock);
return 1;
}
node->time_out[port->type] = jiffies;
node->seq_out[port->type] = sequence_nr;
spin_unlock_bh(&node->seq_out_lock);
return 0;
}
static struct hsr_port *get_late_port(struct hsr_priv *hsr,
struct hsr_node *node)
{
if (node->time_in_stale[HSR_PT_SLAVE_A])
return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
if (node->time_in_stale[HSR_PT_SLAVE_B])
return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
if (time_after(node->time_in[HSR_PT_SLAVE_B],
node->time_in[HSR_PT_SLAVE_A] +
msecs_to_jiffies(MAX_SLAVE_DIFF)))
return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
if (time_after(node->time_in[HSR_PT_SLAVE_A],
node->time_in[HSR_PT_SLAVE_B] +
msecs_to_jiffies(MAX_SLAVE_DIFF)))
return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
return NULL;
}
/* Remove stale sequence_nr records. Called by timer every
* HSR_LIFE_CHECK_INTERVAL (two seconds or so).
*/
void hsr_prune_nodes(struct timer_list *t)
{
struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
struct hsr_node *node;
struct hsr_node *tmp;
struct hsr_port *port;
unsigned long timestamp;
unsigned long time_a, time_b;
spin_lock_bh(&hsr->list_lock);
list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
/* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
* nor time_in[HSR_PT_SLAVE_B], will ever be updated for
* the master port. Thus the master node will be repeatedly
* pruned leading to packet loss.
*/
if (hsr_addr_is_self(hsr, node->macaddress_A))
continue;
/* Shorthand */
time_a = node->time_in[HSR_PT_SLAVE_A];
time_b = node->time_in[HSR_PT_SLAVE_B];
/* Check for timestamps old enough to risk wrap-around */
if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
node->time_in_stale[HSR_PT_SLAVE_A] = true;
if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
node->time_in_stale[HSR_PT_SLAVE_B] = true;
/* Get age of newest frame from node.
* At least one time_in is OK here; nodes get pruned long
* before both time_ins can get stale
*/
timestamp = time_a;
if (node->time_in_stale[HSR_PT_SLAVE_A] ||
(!node->time_in_stale[HSR_PT_SLAVE_B] &&
time_after(time_b, time_a)))
timestamp = time_b;
/* Warn of ring error only as long as we get frames at all */
if (time_is_after_jiffies(timestamp +
msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
rcu_read_lock();
port = get_late_port(hsr, node);
if (port)
hsr_nl_ringerror(hsr, node->macaddress_A, port);
rcu_read_unlock();
}
/* Prune old entries */
if (time_is_before_jiffies(timestamp +
msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
hsr_nl_nodedown(hsr, node->macaddress_A);
if (!node->removed) {
list_del_rcu(&node->mac_list);
node->removed = true;
/* Note that we need to free this entry later: */
kfree_rcu(node, rcu_head);
}
}
}
spin_unlock_bh(&hsr->list_lock);
/* Restart timer */
mod_timer(&hsr->prune_timer,
jiffies + msecs_to_jiffies(PRUNE_PERIOD));
}
void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
unsigned char addr[ETH_ALEN])
{
struct hsr_node *node;
if (!_pos) {
node = list_first_or_null_rcu(&hsr->node_db,
struct hsr_node, mac_list);
if (node)
ether_addr_copy(addr, node->macaddress_A);
return node;
}
node = _pos;
list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
ether_addr_copy(addr, node->macaddress_A);
return node;
}
return NULL;
}
int hsr_get_node_data(struct hsr_priv *hsr,
const unsigned char *addr,
unsigned char addr_b[ETH_ALEN],
unsigned int *addr_b_ifindex,
int *if1_age,
u16 *if1_seq,
int *if2_age,
u16 *if2_seq)
{
struct hsr_node *node;
struct hsr_port *port;
unsigned long tdiff;
node = find_node_by_addr_A(&hsr->node_db, addr);
if (!node)
return -ENOENT;
ether_addr_copy(addr_b, node->macaddress_B);
tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
if (node->time_in_stale[HSR_PT_SLAVE_A])
*if1_age = INT_MAX;
#if HZ <= MSEC_PER_SEC
else if (tdiff > msecs_to_jiffies(INT_MAX))
*if1_age = INT_MAX;
#endif
else
*if1_age = jiffies_to_msecs(tdiff);
tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
if (node->time_in_stale[HSR_PT_SLAVE_B])
*if2_age = INT_MAX;
#if HZ <= MSEC_PER_SEC
else if (tdiff > msecs_to_jiffies(INT_MAX))
*if2_age = INT_MAX;
#endif
else
*if2_age = jiffies_to_msecs(tdiff);
/* Present sequence numbers as if they were incoming on interface */
*if1_seq = node->seq_out[HSR_PT_SLAVE_B];
*if2_seq = node->seq_out[HSR_PT_SLAVE_A];
if (node->addr_B_port != HSR_PT_NONE) {
port = hsr_port_get_hsr(hsr, node->addr_B_port);
*addr_b_ifindex = port->dev->ifindex;
} else {
*addr_b_ifindex = -1;
}
return 0;
}
| linux-master | net/hsr/hsr_framereg.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011-2014 Autronica Fire and Security AS
*
* Author(s):
* 2011-2014 Arvid Brodin, [email protected]
*
* Routines for handling Netlink messages for HSR and PRP.
*/
#include "hsr_netlink.h"
#include <linux/kernel.h>
#include <net/rtnetlink.h>
#include <net/genetlink.h>
#include "hsr_main.h"
#include "hsr_device.h"
#include "hsr_framereg.h"
static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
[IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
[IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
[IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
[IFLA_HSR_VERSION] = { .type = NLA_U8 },
[IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
[IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
[IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
};
/* Here, it seems a netdevice has already been allocated for us, and the
* hsr_dev_setup routine has been executed. Nice!
*/
static int hsr_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
enum hsr_version proto_version;
unsigned char multicast_spec;
u8 proto = HSR_PROTOCOL_HSR;
struct net_device *link[2];
if (!data) {
NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
return -EINVAL;
}
if (!data[IFLA_HSR_SLAVE1]) {
NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
return -EINVAL;
}
link[0] = __dev_get_by_index(src_net,
nla_get_u32(data[IFLA_HSR_SLAVE1]));
if (!link[0]) {
NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
return -EINVAL;
}
if (!data[IFLA_HSR_SLAVE2]) {
NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
return -EINVAL;
}
link[1] = __dev_get_by_index(src_net,
nla_get_u32(data[IFLA_HSR_SLAVE2]));
if (!link[1]) {
NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
return -EINVAL;
}
if (link[0] == link[1]) {
NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
return -EINVAL;
}
if (!data[IFLA_HSR_MULTICAST_SPEC])
multicast_spec = 0;
else
multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
if (data[IFLA_HSR_PROTOCOL])
proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
if (proto >= HSR_PROTOCOL_MAX) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
return -EINVAL;
}
if (!data[IFLA_HSR_VERSION]) {
proto_version = HSR_V0;
} else {
if (proto == HSR_PROTOCOL_PRP) {
NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
return -EINVAL;
}
proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
if (proto_version > HSR_V1) {
NL_SET_ERR_MSG_MOD(extack,
"Only HSR version 0/1 supported");
return -EINVAL;
}
}
if (proto == HSR_PROTOCOL_PRP)
proto_version = PRP_V1;
return hsr_dev_finalize(dev, link, multicast_spec, proto_version, extack);
}
static void hsr_dellink(struct net_device *dev, struct list_head *head)
{
struct hsr_priv *hsr = netdev_priv(dev);
del_timer_sync(&hsr->prune_timer);
del_timer_sync(&hsr->announce_timer);
hsr_debugfs_term(hsr);
hsr_del_ports(hsr);
hsr_del_self_node(hsr);
hsr_del_nodes(&hsr->node_db);
unregister_netdevice_queue(dev, head);
}
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct hsr_priv *hsr = netdev_priv(dev);
u8 proto = HSR_PROTOCOL_HSR;
struct hsr_port *port;
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
if (port) {
if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
goto nla_put_failure;
}
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
if (port) {
if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
goto nla_put_failure;
}
if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
hsr->sup_multicast_addr) ||
nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
goto nla_put_failure;
if (hsr->prot_version == PRP_V1)
proto = HSR_PROTOCOL_PRP;
if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static struct rtnl_link_ops hsr_link_ops __read_mostly = {
.kind = "hsr",
.maxtype = IFLA_HSR_MAX,
.policy = hsr_policy,
.priv_size = sizeof(struct hsr_priv),
.setup = hsr_dev_setup,
.newlink = hsr_newlink,
.dellink = hsr_dellink,
.fill_info = hsr_fill_info,
};
/* attribute policy */
static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
[HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
[HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
[HSR_A_IFINDEX] = { .type = NLA_U32 },
[HSR_A_IF1_AGE] = { .type = NLA_U32 },
[HSR_A_IF2_AGE] = { .type = NLA_U32 },
[HSR_A_IF1_SEQ] = { .type = NLA_U16 },
[HSR_A_IF2_SEQ] = { .type = NLA_U16 },
};
static struct genl_family hsr_genl_family;
static const struct genl_multicast_group hsr_mcgrps[] = {
{ .name = "hsr-network", },
};
/* This is called if for some node with MAC address addr, we only get frames
* over one of the slave interfaces. This would indicate an open network ring
* (i.e. a link has failed somewhere).
*/
void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
struct hsr_port *port)
{
struct sk_buff *skb;
void *msg_head;
struct hsr_port *master;
int res;
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb)
goto fail;
msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
HSR_C_RING_ERROR);
if (!msg_head)
goto nla_put_failure;
res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
if (res < 0)
goto nla_put_failure;
res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
if (res < 0)
goto nla_put_failure;
genlmsg_end(skb, msg_head);
genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
return;
nla_put_failure:
kfree_skb(skb);
fail:
rcu_read_lock();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
netdev_warn(master->dev, "Could not send HSR ring error message\n");
rcu_read_unlock();
}
/* This is called when we haven't heard from the node with MAC address addr for
* some time (just before the node is removed from the node table/list).
*/
void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
{
struct sk_buff *skb;
void *msg_head;
struct hsr_port *master;
int res;
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb)
goto fail;
msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
if (!msg_head)
goto nla_put_failure;
res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
if (res < 0)
goto nla_put_failure;
genlmsg_end(skb, msg_head);
genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
return;
nla_put_failure:
kfree_skb(skb);
fail:
rcu_read_lock();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
netdev_warn(master->dev, "Could not send HSR node down\n");
rcu_read_unlock();
}
/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
* about the status of a specific node in the network, defined by its MAC
* address.
*
* Input: hsr ifindex, node mac address
* Output: hsr ifindex, node mac address (copied from request),
* age of latest frame from node over slave 1, slave 2 [ms]
*/
static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
{
/* For receiving */
struct nlattr *na;
struct net_device *hsr_dev;
/* For sending */
struct sk_buff *skb_out;
void *msg_head;
struct hsr_priv *hsr;
struct hsr_port *port;
unsigned char hsr_node_addr_b[ETH_ALEN];
int hsr_node_if1_age;
u16 hsr_node_if1_seq;
int hsr_node_if2_age;
u16 hsr_node_if2_seq;
int addr_b_ifindex;
int res;
if (!info)
goto invalid;
na = info->attrs[HSR_A_IFINDEX];
if (!na)
goto invalid;
na = info->attrs[HSR_A_NODE_ADDR];
if (!na)
goto invalid;
rcu_read_lock();
hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
nla_get_u32(info->attrs[HSR_A_IFINDEX]));
if (!hsr_dev)
goto rcu_unlock;
if (!is_hsr_master(hsr_dev))
goto rcu_unlock;
/* Send reply */
skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb_out) {
res = -ENOMEM;
goto fail;
}
msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
info->snd_seq, &hsr_genl_family, 0,
HSR_C_SET_NODE_STATUS);
if (!msg_head) {
res = -ENOMEM;
goto nla_put_failure;
}
res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
if (res < 0)
goto nla_put_failure;
hsr = netdev_priv(hsr_dev);
res = hsr_get_node_data(hsr,
(unsigned char *)
nla_data(info->attrs[HSR_A_NODE_ADDR]),
hsr_node_addr_b,
&addr_b_ifindex,
&hsr_node_if1_age,
&hsr_node_if1_seq,
&hsr_node_if2_age,
&hsr_node_if2_seq);
if (res < 0)
goto nla_put_failure;
res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
nla_data(info->attrs[HSR_A_NODE_ADDR]));
if (res < 0)
goto nla_put_failure;
if (addr_b_ifindex > -1) {
res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
hsr_node_addr_b);
if (res < 0)
goto nla_put_failure;
res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
addr_b_ifindex);
if (res < 0)
goto nla_put_failure;
}
res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
if (res < 0)
goto nla_put_failure;
res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
if (res < 0)
goto nla_put_failure;
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
if (port)
res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
port->dev->ifindex);
if (res < 0)
goto nla_put_failure;
res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
if (res < 0)
goto nla_put_failure;
res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
if (res < 0)
goto nla_put_failure;
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
if (port)
res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
port->dev->ifindex);
if (res < 0)
goto nla_put_failure;
rcu_read_unlock();
genlmsg_end(skb_out, msg_head);
genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
return 0;
rcu_unlock:
rcu_read_unlock();
invalid:
netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
return 0;
nla_put_failure:
kfree_skb(skb_out);
/* Fall through */
fail:
rcu_read_unlock();
return res;
}
/* Get a list of MacAddressA of all nodes known to this node (including self).
*/
static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
{
unsigned char addr[ETH_ALEN];
struct net_device *hsr_dev;
struct sk_buff *skb_out;
struct hsr_priv *hsr;
bool restart = false;
struct nlattr *na;
void *pos = NULL;
void *msg_head;
int res;
if (!info)
goto invalid;
na = info->attrs[HSR_A_IFINDEX];
if (!na)
goto invalid;
rcu_read_lock();
hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
nla_get_u32(info->attrs[HSR_A_IFINDEX]));
if (!hsr_dev)
goto rcu_unlock;
if (!is_hsr_master(hsr_dev))
goto rcu_unlock;
restart:
/* Send reply */
skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb_out) {
res = -ENOMEM;
goto fail;
}
msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
info->snd_seq, &hsr_genl_family, 0,
HSR_C_SET_NODE_LIST);
if (!msg_head) {
res = -ENOMEM;
goto nla_put_failure;
}
if (!restart) {
res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
if (res < 0)
goto nla_put_failure;
}
hsr = netdev_priv(hsr_dev);
if (!pos)
pos = hsr_get_next_node(hsr, NULL, addr);
while (pos) {
res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
if (res < 0) {
if (res == -EMSGSIZE) {
genlmsg_end(skb_out, msg_head);
genlmsg_unicast(genl_info_net(info), skb_out,
info->snd_portid);
restart = true;
goto restart;
}
goto nla_put_failure;
}
pos = hsr_get_next_node(hsr, pos, addr);
}
rcu_read_unlock();
genlmsg_end(skb_out, msg_head);
genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
return 0;
rcu_unlock:
rcu_read_unlock();
invalid:
netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
return 0;
nla_put_failure:
nlmsg_free(skb_out);
/* Fall through */
fail:
rcu_read_unlock();
return res;
}
static const struct genl_small_ops hsr_ops[] = {
{
.cmd = HSR_C_GET_NODE_STATUS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = 0,
.doit = hsr_get_node_status,
.dumpit = NULL,
},
{
.cmd = HSR_C_GET_NODE_LIST,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = 0,
.doit = hsr_get_node_list,
.dumpit = NULL,
},
};
static struct genl_family hsr_genl_family __ro_after_init = {
.hdrsize = 0,
.name = "HSR",
.version = 1,
.maxattr = HSR_A_MAX,
.policy = hsr_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.small_ops = hsr_ops,
.n_small_ops = ARRAY_SIZE(hsr_ops),
.resv_start_op = HSR_C_SET_NODE_LIST + 1,
.mcgrps = hsr_mcgrps,
.n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
};
int __init hsr_netlink_init(void)
{
int rc;
rc = rtnl_link_register(&hsr_link_ops);
if (rc)
goto fail_rtnl_link_register;
rc = genl_register_family(&hsr_genl_family);
if (rc)
goto fail_genl_register_family;
hsr_debugfs_create_root();
return 0;
fail_genl_register_family:
rtnl_link_unregister(&hsr_link_ops);
fail_rtnl_link_register:
return rc;
}
void __exit hsr_netlink_exit(void)
{
genl_unregister_family(&hsr_genl_family);
rtnl_link_unregister(&hsr_link_ops);
}
MODULE_ALIAS_RTNL_LINK("hsr");
| linux-master | net/hsr/hsr_netlink.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011-2014 Autronica Fire and Security AS
*
* Author(s):
* 2011-2014 Arvid Brodin, [email protected]
* This file contains device methods for creating, using and destroying
* virtual HSR or PRP devices.
*/
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
#include "hsr_device.h"
#include "hsr_slave.h"
#include "hsr_framereg.h"
#include "hsr_main.h"
#include "hsr_forward.h"
static bool is_admin_up(struct net_device *dev)
{
return dev && (dev->flags & IFF_UP);
}
static bool is_slave_up(struct net_device *dev)
{
return dev && is_admin_up(dev) && netif_oper_up(dev);
}
static void __hsr_set_operstate(struct net_device *dev, int transition)
{
write_lock(&dev_base_lock);
if (dev->operstate != transition) {
dev->operstate = transition;
write_unlock(&dev_base_lock);
netdev_state_change(dev);
} else {
write_unlock(&dev_base_lock);
}
}
static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
{
if (!is_admin_up(master->dev)) {
__hsr_set_operstate(master->dev, IF_OPER_DOWN);
return;
}
if (has_carrier)
__hsr_set_operstate(master->dev, IF_OPER_UP);
else
__hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
}
static bool hsr_check_carrier(struct hsr_port *master)
{
struct hsr_port *port;
ASSERT_RTNL();
hsr_for_each_port(master->hsr, port) {
if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
netif_carrier_on(master->dev);
return true;
}
}
netif_carrier_off(master->dev);
return false;
}
static void hsr_check_announce(struct net_device *hsr_dev,
unsigned char old_operstate)
{
struct hsr_priv *hsr;
hsr = netdev_priv(hsr_dev);
if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
/* Went up */
hsr->announce_count = 0;
mod_timer(&hsr->announce_timer,
jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
}
if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
/* Went down */
del_timer(&hsr->announce_timer);
}
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
{
struct hsr_port *master;
unsigned char old_operstate;
bool has_carrier;
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
/* netif_stacked_transfer_operstate() cannot be used here since
* it doesn't set IF_OPER_LOWERLAYERDOWN (?)
*/
old_operstate = master->dev->operstate;
has_carrier = hsr_check_carrier(master);
hsr_set_operstate(master, has_carrier);
hsr_check_announce(master->dev, old_operstate);
}
int hsr_get_max_mtu(struct hsr_priv *hsr)
{
unsigned int mtu_max;
struct hsr_port *port;
mtu_max = ETH_DATA_LEN;
hsr_for_each_port(hsr, port)
if (port->type != HSR_PT_MASTER)
mtu_max = min(port->dev->mtu, mtu_max);
if (mtu_max < HSR_HLEN)
return 0;
return mtu_max - HSR_HLEN;
}
static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
{
struct hsr_priv *hsr;
hsr = netdev_priv(dev);
if (new_mtu > hsr_get_max_mtu(hsr)) {
netdev_info(dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
HSR_HLEN);
return -EINVAL;
}
dev->mtu = new_mtu;
return 0;
}
static int hsr_dev_open(struct net_device *dev)
{
struct hsr_priv *hsr;
struct hsr_port *port;
char designation;
hsr = netdev_priv(dev);
designation = '\0';
hsr_for_each_port(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
switch (port->type) {
case HSR_PT_SLAVE_A:
designation = 'A';
break;
case HSR_PT_SLAVE_B:
designation = 'B';
break;
default:
designation = '?';
}
if (!is_slave_up(port->dev))
netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
designation, port->dev->name);
}
if (designation == '\0')
netdev_warn(dev, "No slave devices configured\n");
return 0;
}
static int hsr_dev_close(struct net_device *dev)
{
/* Nothing to do here. */
return 0;
}
static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
netdev_features_t features)
{
netdev_features_t mask;
struct hsr_port *port;
mask = features;
/* Mask out all features that, if supported by one device, should be
* enabled for all devices (see NETIF_F_ONE_FOR_ALL).
*
* Anything that's off in mask will not be enabled - so only things
* that were in features originally, and also is in NETIF_F_ONE_FOR_ALL,
* may become enabled.
*/
features &= ~NETIF_F_ONE_FOR_ALL;
hsr_for_each_port(hsr, port)
features = netdev_increment_features(features,
port->dev->features,
mask);
return features;
}
static netdev_features_t hsr_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct hsr_priv *hsr = netdev_priv(dev);
return hsr_features_recompute(hsr, features);
}
static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct hsr_priv *hsr = netdev_priv(dev);
struct hsr_port *master;
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
if (master) {
skb->dev = master->dev;
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
spin_lock_bh(&hsr->seqnr_lock);
hsr_forward_skb(skb, master);
spin_unlock_bh(&hsr->seqnr_lock);
} else {
dev_core_stats_tx_dropped_inc(dev);
dev_kfree_skb_any(skb);
}
return NETDEV_TX_OK;
}
static const struct header_ops hsr_header_ops = {
.create = eth_header,
.parse = eth_header_parse,
};
static struct sk_buff *hsr_init_skb(struct hsr_port *master)
{
struct hsr_priv *hsr = master->hsr;
struct sk_buff *skb;
int hlen, tlen;
hlen = LL_RESERVED_SPACE(master->dev);
tlen = master->dev->needed_tailroom;
/* skb size is same for PRP/HSR frames, only difference
* being, for PRP it is a trailer and for HSR it is a
* header
*/
skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) +
sizeof(struct hsr_sup_payload) + hlen + tlen);
if (!skb)
return skb;
skb_reserve(skb, hlen);
skb->dev = master->dev;
skb->priority = TC_PRIO_CONTROL;
if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
hsr->sup_multicast_addr,
skb->dev->dev_addr, skb->len) <= 0)
goto out;
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
return skb;
out:
kfree_skb(skb);
return NULL;
}
static void send_hsr_supervision_frame(struct hsr_port *master,
unsigned long *interval)
{
struct hsr_priv *hsr = master->hsr;
__u8 type = HSR_TLV_LIFE_CHECK;
struct hsr_sup_payload *hsr_sp;
struct hsr_sup_tag *hsr_stag;
struct sk_buff *skb;
*interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
if (hsr->announce_count < 3 && hsr->prot_version == 0) {
type = HSR_TLV_ANNOUNCE;
*interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
hsr->announce_count++;
}
skb = hsr_init_skb(master);
if (!skb) {
WARN_ONCE(1, "HSR: Could not send supervision frame\n");
return;
}
hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version);
/* From HSRv1 on we have separate supervision sequence numbers. */
spin_lock_bh(&hsr->seqnr_lock);
if (hsr->prot_version > 0) {
hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
hsr->sup_sequence_nr++;
} else {
hsr_stag->sequence_nr = htons(hsr->sequence_nr);
hsr->sequence_nr++;
}
hsr_stag->tlv.HSR_TLV_type = type;
/* TODO: Why 12 in HSRv0? */
hsr_stag->tlv.HSR_TLV_length = hsr->prot_version ?
sizeof(struct hsr_sup_payload) : 12;
/* Payload: MacAddressA */
hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
if (skb_put_padto(skb, ETH_ZLEN)) {
spin_unlock_bh(&hsr->seqnr_lock);
return;
}
hsr_forward_skb(skb, master);
spin_unlock_bh(&hsr->seqnr_lock);
return;
}
static void send_prp_supervision_frame(struct hsr_port *master,
unsigned long *interval)
{
struct hsr_priv *hsr = master->hsr;
struct hsr_sup_payload *hsr_sp;
struct hsr_sup_tag *hsr_stag;
struct sk_buff *skb;
skb = hsr_init_skb(master);
if (!skb) {
WARN_ONCE(1, "PRP: Could not send supervision frame\n");
return;
}
*interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0));
/* From HSRv1 on we have separate supervision sequence numbers. */
spin_lock_bh(&hsr->seqnr_lock);
hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
hsr->sup_sequence_nr++;
hsr_stag->tlv.HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD;
hsr_stag->tlv.HSR_TLV_length = sizeof(struct hsr_sup_payload);
/* Payload: MacAddressA */
hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
if (skb_put_padto(skb, ETH_ZLEN)) {
spin_unlock_bh(&hsr->seqnr_lock);
return;
}
hsr_forward_skb(skb, master);
spin_unlock_bh(&hsr->seqnr_lock);
}
/* Announce (supervision frame) timer function
*/
static void hsr_announce(struct timer_list *t)
{
struct hsr_priv *hsr;
struct hsr_port *master;
unsigned long interval;
hsr = from_timer(hsr, t, announce_timer);
rcu_read_lock();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr->proto_ops->send_sv_frame(master, &interval);
if (is_admin_up(master->dev))
mod_timer(&hsr->announce_timer, jiffies + interval);
rcu_read_unlock();
}
void hsr_del_ports(struct hsr_priv *hsr)
{
struct hsr_port *port;
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
if (port)
hsr_del_port(port);
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
if (port)
hsr_del_port(port);
port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
if (port)
hsr_del_port(port);
}
static const struct net_device_ops hsr_device_ops = {
.ndo_change_mtu = hsr_dev_change_mtu,
.ndo_open = hsr_dev_open,
.ndo_stop = hsr_dev_close,
.ndo_start_xmit = hsr_dev_xmit,
.ndo_fix_features = hsr_fix_features,
};
static struct device_type hsr_type = {
.name = "hsr",
};
static struct hsr_proto_ops hsr_ops = {
.send_sv_frame = send_hsr_supervision_frame,
.create_tagged_frame = hsr_create_tagged_frame,
.get_untagged_frame = hsr_get_untagged_frame,
.drop_frame = hsr_drop_frame,
.fill_frame_info = hsr_fill_frame_info,
.invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame,
};
static struct hsr_proto_ops prp_ops = {
.send_sv_frame = send_prp_supervision_frame,
.create_tagged_frame = prp_create_tagged_frame,
.get_untagged_frame = prp_get_untagged_frame,
.drop_frame = prp_drop_frame,
.fill_frame_info = prp_fill_frame_info,
.handle_san_frame = prp_handle_san_frame,
.update_san_info = prp_update_san_info,
};
void hsr_dev_setup(struct net_device *dev)
{
eth_hw_addr_random(dev);
ether_setup(dev);
dev->min_mtu = 0;
dev->header_ops = &hsr_header_ops;
dev->netdev_ops = &hsr_device_ops;
SET_NETDEV_DEVTYPE(dev, &hsr_type);
dev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
dev->needs_free_netdev = true;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX;
dev->features = dev->hw_features;
/* Prevent recursive tx locking */
dev->features |= NETIF_F_LLTX;
/* VLAN on top of HSR needs testing and probably some work on
* hsr_header_create() etc.
*/
dev->features |= NETIF_F_VLAN_CHALLENGED;
/* Not sure about this. Taken from bridge code. netdev_features.h says
* it means "Does not change network namespaces".
*/
dev->features |= NETIF_F_NETNS_LOCAL;
}
/* Return true if dev is a HSR master; return false otherwise.
*/
bool is_hsr_master(struct net_device *dev)
{
return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
}
EXPORT_SYMBOL(is_hsr_master);
/* Default multicast address for HSR Supervision frames */
static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
};
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
unsigned char multicast_spec, u8 protocol_version,
struct netlink_ext_ack *extack)
{
bool unregister = false;
struct hsr_priv *hsr;
int res;
hsr = netdev_priv(hsr_dev);
INIT_LIST_HEAD(&hsr->ports);
INIT_LIST_HEAD(&hsr->node_db);
spin_lock_init(&hsr->list_lock);
eth_hw_addr_set(hsr_dev, slave[0]->dev_addr);
/* initialize protocol specific functions */
if (protocol_version == PRP_V1) {
/* For PRP, lan_id has most significant 3 bits holding
* the net_id of PRP_LAN_ID
*/
hsr->net_id = PRP_LAN_ID << 1;
hsr->proto_ops = &prp_ops;
} else {
hsr->proto_ops = &hsr_ops;
}
/* Make sure we recognize frames from ourselves in hsr_rcv() */
res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
slave[1]->dev_addr);
if (res < 0)
return res;
spin_lock_init(&hsr->seqnr_lock);
/* Overflow soon to find bugs easier: */
hsr->sequence_nr = HSR_SEQNR_START;
hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
timer_setup(&hsr->announce_timer, hsr_announce, 0);
timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
hsr->prot_version = protocol_version;
/* Make sure the 1st call to netif_carrier_on() gets through */
netif_carrier_off(hsr_dev);
res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER, extack);
if (res)
goto err_add_master;
/* HSR forwarding offload supported in lower device? */
if ((slave[0]->features & NETIF_F_HW_HSR_FWD) &&
(slave[1]->features & NETIF_F_HW_HSR_FWD))
hsr->fwd_offloaded = true;
res = register_netdevice(hsr_dev);
if (res)
goto err_unregister;
unregister = true;
res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
if (res)
goto err_unregister;
res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
if (res)
goto err_unregister;
hsr_debugfs_init(hsr, hsr_dev);
mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
return 0;
err_unregister:
hsr_del_ports(hsr);
err_add_master:
hsr_del_self_node(hsr);
if (unregister)
unregister_netdevice(hsr_dev);
return res;
}
| linux-master | net/hsr/hsr_device.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011-2014 Autronica Fire and Security AS
*
* Author(s):
* 2011-2014 Arvid Brodin, [email protected]
*
* Frame handler other utility functions for HSR and PRP.
*/
#include "hsr_slave.h"
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include "hsr_main.h"
#include "hsr_device.h"
#include "hsr_forward.h"
#include "hsr_framereg.h"
bool hsr_invalid_dan_ingress_frame(__be16 protocol)
{
return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR));
}
static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct hsr_port *port;
struct hsr_priv *hsr;
__be16 protocol;
/* Packets from dev_loopback_xmit() do not have L2 header, bail out */
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return RX_HANDLER_PASS;
if (!skb_mac_header_was_set(skb)) {
WARN_ONCE(1, "%s: skb invalid", __func__);
return RX_HANDLER_PASS;
}
port = hsr_port_get_rcu(skb->dev);
if (!port)
goto finish_pass;
hsr = port->hsr;
if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
/* Directly kill frames sent by ourselves */
kfree_skb(skb);
goto finish_consume;
}
/* For HSR, only tagged frames are expected (unless the device offloads
* HSR tag removal), but for PRP there could be non tagged frames as
* well from Single attached nodes (SANs).
*/
protocol = eth_hdr(skb)->h_proto;
if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
hsr->proto_ops->invalid_dan_ingress_frame &&
hsr->proto_ops->invalid_dan_ingress_frame(protocol))
goto finish_pass;
skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
protocol == htons(ETH_P_HSR))
skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
skb_reset_mac_len(skb);
hsr_forward_skb(skb, port);
finish_consume:
return RX_HANDLER_CONSUMED;
finish_pass:
return RX_HANDLER_PASS;
}
bool hsr_port_exists(const struct net_device *dev)
{
return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
}
static int hsr_check_dev_ok(struct net_device *dev,
struct netlink_ext_ack *extack)
{
/* Don't allow HSR on non-ethernet like devices */
if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
dev->addr_len != ETH_ALEN) {
NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave.");
return -EINVAL;
}
/* Don't allow enslaving hsr devices */
if (is_hsr_master(dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot create trees of HSR devices.");
return -EINVAL;
}
if (hsr_port_exists(dev)) {
NL_SET_ERR_MSG_MOD(extack,
"This device is already a HSR slave.");
return -EINVAL;
}
if (is_vlan_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver.");
return -EINVAL;
}
if (dev->priv_flags & IFF_DONT_BRIDGE) {
NL_SET_ERR_MSG_MOD(extack,
"This device does not support bridging.");
return -EOPNOTSUPP;
}
/* HSR over bonded devices has not been tested, but I'm not sure it
* won't work...
*/
return 0;
}
/* Setup device to be added to the HSR bridge. */
static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
struct hsr_port *port,
struct netlink_ext_ack *extack)
{
struct net_device *hsr_dev;
struct hsr_port *master;
int res;
/* Don't use promiscuous mode for offload since L2 frame forward
* happens at the offloaded hardware.
*/
if (!port->hsr->fwd_offloaded) {
res = dev_set_promiscuity(dev, 1);
if (res)
return res;
}
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr_dev = master->dev;
res = netdev_upper_dev_link(dev, hsr_dev, extack);
if (res)
goto fail_upper_dev_link;
res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
if (res)
goto fail_rx_handler;
dev_disable_lro(dev);
return 0;
fail_rx_handler:
netdev_upper_dev_unlink(dev, hsr_dev);
fail_upper_dev_link:
if (!port->hsr->fwd_offloaded)
dev_set_promiscuity(dev, -1);
return res;
}
int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
enum hsr_port_type type, struct netlink_ext_ack *extack)
{
struct hsr_port *port, *master;
int res;
if (type != HSR_PT_MASTER) {
res = hsr_check_dev_ok(dev, extack);
if (res)
return res;
}
port = hsr_port_get_hsr(hsr, type);
if (port)
return -EBUSY; /* This port already exists */
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->hsr = hsr;
port->dev = dev;
port->type = type;
if (type != HSR_PT_MASTER) {
res = hsr_portdev_setup(hsr, dev, port, extack);
if (res)
goto fail_dev_setup;
}
list_add_tail_rcu(&port->port_list, &hsr->ports);
synchronize_rcu();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
netdev_update_features(master->dev);
dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
return 0;
fail_dev_setup:
kfree(port);
return res;
}
void hsr_del_port(struct hsr_port *port)
{
struct hsr_priv *hsr;
struct hsr_port *master;
hsr = port->hsr;
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
list_del_rcu(&port->port_list);
if (port != master) {
netdev_update_features(master->dev);
dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
netdev_rx_handler_unregister(port->dev);
dev_set_promiscuity(port->dev, -1);
netdev_upper_dev_unlink(port->dev, master->dev);
}
synchronize_rcu();
kfree(port);
}
| linux-master | net/hsr/hsr_slave.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011-2014 Autronica Fire and Security AS
*
* Author(s):
* 2011-2014 Arvid Brodin, [email protected]
*
* Frame router for HSR and PRP.
*/
#include "hsr_forward.h"
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include "hsr_main.h"
#include "hsr_framereg.h"
struct hsr_node;
/* The uses I can see for these HSR supervision frames are:
* 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
* 22") to reset any sequence_nr counters belonging to that node. Useful if
* the other node's counter has been reset for some reason.
* --
* Or not - resetting the counter and bridging the frame would create a
* loop, unfortunately.
*
* 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
* frame is received from a particular node, we know something is wrong.
* We just register these (as with normal frames) and throw them away.
*
* 3) Allow different MAC addresses for the two slave interfaces, using the
* MacAddressA field.
*/
static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
{
struct ethhdr *eth_hdr;
struct hsr_sup_tag *hsr_sup_tag;
struct hsrv1_ethhdr_sp *hsr_V1_hdr;
struct hsr_sup_tlv *hsr_sup_tlv;
u16 total_length = 0;
WARN_ON_ONCE(!skb_mac_header_was_set(skb));
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
/* Correct addr? */
if (!ether_addr_equal(eth_hdr->h_dest,
hsr->sup_multicast_addr))
return false;
/* Correct ether type?. */
if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
eth_hdr->h_proto == htons(ETH_P_HSR)))
return false;
/* Get the supervision header from correct location. */
if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
total_length = sizeof(struct hsrv1_ethhdr_sp);
if (!pskb_may_pull(skb, total_length))
return false;
hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
return false;
hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
} else {
total_length = sizeof(struct hsrv0_ethhdr_sp);
if (!pskb_may_pull(skb, total_length))
return false;
hsr_sup_tag =
&((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
}
if (hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_ANNOUNCE &&
hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
return false;
if (hsr_sup_tag->tlv.HSR_TLV_length != 12 &&
hsr_sup_tag->tlv.HSR_TLV_length != sizeof(struct hsr_sup_payload))
return false;
/* Get next tlv */
total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length;
if (!pskb_may_pull(skb, total_length))
return false;
skb_pull(skb, total_length);
hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
skb_push(skb, total_length);
/* if this is a redbox supervision frame we need to verify
* that more data is available
*/
if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
/* tlv length must be a length of a mac address */
if (hsr_sup_tlv->HSR_TLV_length != sizeof(struct hsr_sup_payload))
return false;
/* make sure another tlv follows */
total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tlv->HSR_TLV_length;
if (!pskb_may_pull(skb, total_length))
return false;
/* get next tlv */
skb_pull(skb, total_length);
hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
skb_push(skb, total_length);
}
/* end of tlvs must follow at the end */
if (hsr_sup_tlv->HSR_TLV_type == HSR_TLV_EOT &&
hsr_sup_tlv->HSR_TLV_length != 0)
return false;
return true;
}
static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
struct hsr_frame_info *frame)
{
struct sk_buff *skb;
int copylen;
unsigned char *dst, *src;
skb_pull(skb_in, HSR_HLEN);
skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
skb_push(skb_in, HSR_HLEN);
if (!skb)
return NULL;
skb_reset_mac_header(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL)
skb->csum_start -= HSR_HLEN;
copylen = 2 * ETH_ALEN;
if (frame->is_vlan)
copylen += VLAN_HLEN;
src = skb_mac_header(skb_in);
dst = skb_mac_header(skb);
memcpy(dst, src, copylen);
skb->protocol = eth_hdr(skb)->h_proto;
return skb;
}
struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
struct hsr_port *port)
{
if (!frame->skb_std) {
if (frame->skb_hsr)
frame->skb_std =
create_stripped_skb_hsr(frame->skb_hsr, frame);
else
netdev_warn_once(port->dev,
"Unexpected frame received in hsr_get_untagged_frame()\n");
if (!frame->skb_std)
return NULL;
}
return skb_clone(frame->skb_std, GFP_ATOMIC);
}
struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
struct hsr_port *port)
{
if (!frame->skb_std) {
if (frame->skb_prp) {
/* trim the skb by len - HSR_HLEN to exclude RCT */
skb_trim(frame->skb_prp,
frame->skb_prp->len - HSR_HLEN);
frame->skb_std =
__pskb_copy(frame->skb_prp,
skb_headroom(frame->skb_prp),
GFP_ATOMIC);
} else {
/* Unexpected */
WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
__FILE__, __LINE__, port->dev->name);
return NULL;
}
}
return skb_clone(frame->skb_std, GFP_ATOMIC);
}
static void prp_set_lan_id(struct prp_rct *trailer,
struct hsr_port *port)
{
int lane_id;
if (port->type == HSR_PT_SLAVE_A)
lane_id = 0;
else
lane_id = 1;
/* Add net_id in the upper 3 bits of lane_id */
lane_id |= port->hsr->net_id;
set_prp_lan_id(trailer, lane_id);
}
/* Tailroom for PRP rct should have been created before calling this */
static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
struct hsr_frame_info *frame,
struct hsr_port *port)
{
struct prp_rct *trailer;
int min_size = ETH_ZLEN;
int lsdu_size;
if (!skb)
return skb;
if (frame->is_vlan)
min_size = VLAN_ETH_ZLEN;
if (skb_put_padto(skb, min_size))
return NULL;
trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
lsdu_size = skb->len - 14;
if (frame->is_vlan)
lsdu_size -= 4;
prp_set_lan_id(trailer, port);
set_prp_LSDU_size(trailer, lsdu_size);
trailer->sequence_nr = htons(frame->sequence_nr);
trailer->PRP_suffix = htons(ETH_P_PRP);
skb->protocol = eth_hdr(skb)->h_proto;
return skb;
}
static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
struct hsr_port *port)
{
int path_id;
if (port->type == HSR_PT_SLAVE_A)
path_id = 0;
else
path_id = 1;
set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
}
static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
struct hsr_frame_info *frame,
struct hsr_port *port, u8 proto_version)
{
struct hsr_ethhdr *hsr_ethhdr;
int lsdu_size;
/* pad to minimum packet size which is 60 + 6 (HSR tag) */
if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
return NULL;
lsdu_size = skb->len - 14;
if (frame->is_vlan)
lsdu_size -= 4;
hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
hsr_set_path_id(hsr_ethhdr, port);
set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
ETH_P_HSR : ETH_P_PRP);
skb->protocol = hsr_ethhdr->ethhdr.h_proto;
return skb;
}
/* If the original frame was an HSR tagged frame, just clone it to be sent
* unchanged. Otherwise, create a private frame especially tagged for 'port'.
*/
struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
struct hsr_port *port)
{
unsigned char *dst, *src;
struct sk_buff *skb;
int movelen;
if (frame->skb_hsr) {
struct hsr_ethhdr *hsr_ethhdr =
(struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
/* set the lane id properly */
hsr_set_path_id(hsr_ethhdr, port);
return skb_clone(frame->skb_hsr, GFP_ATOMIC);
} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
return skb_clone(frame->skb_std, GFP_ATOMIC);
}
/* Create the new skb with enough headroom to fit the HSR tag */
skb = __pskb_copy(frame->skb_std,
skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
if (!skb)
return NULL;
skb_reset_mac_header(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL)
skb->csum_start += HSR_HLEN;
movelen = ETH_HLEN;
if (frame->is_vlan)
movelen += VLAN_HLEN;
src = skb_mac_header(skb);
dst = skb_push(skb, HSR_HLEN);
memmove(dst, src, movelen);
skb_reset_mac_header(skb);
/* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
* that case
*/
return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
}
struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
struct hsr_port *port)
{
struct sk_buff *skb;
if (frame->skb_prp) {
struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
if (trailer) {
prp_set_lan_id(trailer, port);
} else {
WARN_ONCE(!trailer, "errored PRP skb");
return NULL;
}
return skb_clone(frame->skb_prp, GFP_ATOMIC);
} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
return skb_clone(frame->skb_std, GFP_ATOMIC);
}
skb = skb_copy_expand(frame->skb_std, 0,
skb_tailroom(frame->skb_std) + HSR_HLEN,
GFP_ATOMIC);
prp_fill_rct(skb, frame, port);
return skb;
}
static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
struct hsr_node *node_src)
{
bool was_multicast_frame;
int res, recv_len;
was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
hsr_addr_subst_source(node_src, skb);
skb_pull(skb, ETH_HLEN);
recv_len = skb->len;
res = netif_rx(skb);
if (res == NET_RX_DROP) {
dev->stats.rx_dropped++;
} else {
dev->stats.rx_packets++;
dev->stats.rx_bytes += recv_len;
if (was_multicast_frame)
dev->stats.multicast++;
}
}
static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
struct hsr_frame_info *frame)
{
if (frame->port_rcv->type == HSR_PT_MASTER) {
hsr_addr_subst_dest(frame->node_src, skb, port);
/* Address substitution (IEC62439-3 pp 26, 50): replace mac
* address of outgoing frame with that of the outgoing slave's.
*/
ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
}
return dev_queue_xmit(skb);
}
bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
{
return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
port->type == HSR_PT_SLAVE_B) ||
(frame->port_rcv->type == HSR_PT_SLAVE_B &&
port->type == HSR_PT_SLAVE_A));
}
bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
{
if (port->dev->features & NETIF_F_HW_HSR_FWD)
return prp_drop_frame(frame, port);
return false;
}
/* Forward the frame through all devices except:
* - Back through the receiving device
* - If it's a HSR frame: through a device where it has passed before
* - if it's a PRP frame: through another PRP slave device (no bridge)
* - To the local HSR master only if the frame is directly addressed to it, or
* a non-supervision multicast or broadcast frame.
*
* HSR slave devices should insert a HSR tag into the frame, or forward the
* frame unchanged if it's already tagged. Interlink devices should strip HSR
* tags if they're of the non-HSR type (but only after duplicate discard). The
* master device always strips HSR tags.
*/
static void hsr_forward_do(struct hsr_frame_info *frame)
{
struct hsr_port *port;
struct sk_buff *skb;
bool sent = false;
hsr_for_each_port(frame->port_rcv->hsr, port) {
struct hsr_priv *hsr = port->hsr;
/* Don't send frame back the way it came */
if (port == frame->port_rcv)
continue;
/* Don't deliver locally unless we should */
if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
continue;
/* Deliver frames directly addressed to us to master only */
if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
continue;
/* If hardware duplicate generation is enabled, only send out
* one port.
*/
if ((port->dev->features & NETIF_F_HW_HSR_DUP) && sent)
continue;
/* Don't send frame over port where it has been sent before.
* Also fro SAN, this shouldn't be done.
*/
if (!frame->is_from_san &&
hsr_register_frame_out(port, frame->node_src,
frame->sequence_nr))
continue;
if (frame->is_supervision && port->type == HSR_PT_MASTER) {
hsr_handle_sup_frame(frame);
continue;
}
/* Check if frame is to be dropped. Eg. for PRP no forward
* between ports.
*/
if (hsr->proto_ops->drop_frame &&
hsr->proto_ops->drop_frame(frame, port))
continue;
if (port->type != HSR_PT_MASTER)
skb = hsr->proto_ops->create_tagged_frame(frame, port);
else
skb = hsr->proto_ops->get_untagged_frame(frame, port);
if (!skb) {
frame->port_rcv->dev->stats.rx_dropped++;
continue;
}
skb->dev = port->dev;
if (port->type == HSR_PT_MASTER) {
hsr_deliver_master(skb, port->dev, frame->node_src);
} else {
if (!hsr_xmit(skb, port, frame))
sent = true;
}
}
}
static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
struct hsr_frame_info *frame)
{
if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
frame->is_local_exclusive = true;
skb->pkt_type = PACKET_HOST;
} else {
frame->is_local_exclusive = false;
}
if (skb->pkt_type == PACKET_HOST ||
skb->pkt_type == PACKET_MULTICAST ||
skb->pkt_type == PACKET_BROADCAST) {
frame->is_local_dest = true;
} else {
frame->is_local_dest = false;
}
}
static void handle_std_frame(struct sk_buff *skb,
struct hsr_frame_info *frame)
{
struct hsr_port *port = frame->port_rcv;
struct hsr_priv *hsr = port->hsr;
frame->skb_hsr = NULL;
frame->skb_prp = NULL;
frame->skb_std = skb;
if (port->type != HSR_PT_MASTER) {
frame->is_from_san = true;
} else {
/* Sequence nr for the master node */
lockdep_assert_held(&hsr->seqnr_lock);
frame->sequence_nr = hsr->sequence_nr;
hsr->sequence_nr++;
}
}
int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
struct hsr_frame_info *frame)
{
struct hsr_port *port = frame->port_rcv;
struct hsr_priv *hsr = port->hsr;
/* HSRv0 supervisory frames double as a tag so treat them as tagged. */
if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
proto == htons(ETH_P_HSR)) {
/* Check if skb contains hsr_ethhdr */
if (skb->mac_len < sizeof(struct hsr_ethhdr))
return -EINVAL;
/* HSR tagged frame :- Data or Supervision */
frame->skb_std = NULL;
frame->skb_prp = NULL;
frame->skb_hsr = skb;
frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
return 0;
}
/* Standard frame or PRP from master port */
handle_std_frame(skb, frame);
return 0;
}
int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
struct hsr_frame_info *frame)
{
/* Supervision frame */
struct prp_rct *rct = skb_get_PRP_rct(skb);
if (rct &&
prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
frame->skb_hsr = NULL;
frame->skb_std = NULL;
frame->skb_prp = skb;
frame->sequence_nr = prp_get_skb_sequence_nr(rct);
return 0;
}
handle_std_frame(skb, frame);
return 0;
}
static int fill_frame_info(struct hsr_frame_info *frame,
struct sk_buff *skb, struct hsr_port *port)
{
struct hsr_priv *hsr = port->hsr;
struct hsr_vlan_ethhdr *vlan_hdr;
struct ethhdr *ethhdr;
__be16 proto;
int ret;
/* Check if skb contains ethhdr */
if (skb->mac_len < sizeof(struct ethhdr))
return -EINVAL;
memset(frame, 0, sizeof(*frame));
frame->is_supervision = is_supervision_frame(port->hsr, skb);
frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
frame->is_supervision,
port->type);
if (!frame->node_src)
return -1; /* Unknown node and !is_supervision, or no mem */
ethhdr = (struct ethhdr *)skb_mac_header(skb);
frame->is_vlan = false;
proto = ethhdr->h_proto;
if (proto == htons(ETH_P_8021Q))
frame->is_vlan = true;
if (frame->is_vlan) {
vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
/* FIXME: */
netdev_warn_once(skb->dev, "VLAN not yet supported");
return -EINVAL;
}
frame->is_from_san = false;
frame->port_rcv = port;
ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
if (ret)
return ret;
check_local_dest(port->hsr, skb, frame);
return 0;
}
/* Must be called holding rcu read lock (because of the port parameter) */
void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
{
struct hsr_frame_info frame;
rcu_read_lock();
if (fill_frame_info(&frame, skb, port) < 0)
goto out_drop;
hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
hsr_forward_do(&frame);
rcu_read_unlock();
/* Gets called for ingress frames as well as egress from master port.
* So check and increment stats for master port only here.
*/
if (port->type == HSR_PT_MASTER) {
port->dev->stats.tx_packets++;
port->dev->stats.tx_bytes += skb->len;
}
kfree_skb(frame.skb_hsr);
kfree_skb(frame.skb_prp);
kfree_skb(frame.skb_std);
return;
out_drop:
rcu_read_unlock();
port->dev->stats.tx_dropped++;
kfree_skb(skb);
}
| linux-master | net/hsr/hsr_forward.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* debugfs code for HSR & PRP
* Copyright (C) 2019 Texas Instruments Incorporated
*
* Author(s):
* Murali Karicheri <[email protected]>
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/debugfs.h>
#include "hsr_main.h"
#include "hsr_framereg.h"
static struct dentry *hsr_debugfs_root_dir;
/* hsr_node_table_show - Formats and prints node_table entries */
static int
hsr_node_table_show(struct seq_file *sfp, void *data)
{
struct hsr_priv *priv = (struct hsr_priv *)sfp->private;
struct hsr_node *node;
seq_printf(sfp, "Node Table entries for (%s) device\n",
(priv->prot_version == PRP_V1 ? "PRP" : "HSR"));
seq_puts(sfp, "MAC-Address-A, MAC-Address-B, time_in[A], ");
seq_puts(sfp, "time_in[B], Address-B port, ");
if (priv->prot_version == PRP_V1)
seq_puts(sfp, "SAN-A, SAN-B, DAN-P\n");
else
seq_puts(sfp, "DAN-H\n");
rcu_read_lock();
list_for_each_entry_rcu(node, &priv->node_db, mac_list) {
/* skip self node */
if (hsr_addr_is_self(priv, node->macaddress_A))
continue;
seq_printf(sfp, "%pM ", &node->macaddress_A[0]);
seq_printf(sfp, "%pM ", &node->macaddress_B[0]);
seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]);
seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]);
seq_printf(sfp, "%14x, ", node->addr_B_port);
if (priv->prot_version == PRP_V1)
seq_printf(sfp, "%5x, %5x, %5x\n",
node->san_a, node->san_b,
(node->san_a == 0 && node->san_b == 0));
else
seq_printf(sfp, "%5x\n", 1);
}
rcu_read_unlock();
return 0;
}
DEFINE_SHOW_ATTRIBUTE(hsr_node_table);
void hsr_debugfs_rename(struct net_device *dev)
{
struct hsr_priv *priv = netdev_priv(dev);
struct dentry *d;
d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root,
hsr_debugfs_root_dir, dev->name);
if (IS_ERR(d))
netdev_warn(dev, "failed to rename\n");
else
priv->node_tbl_root = d;
}
/* hsr_debugfs_init - create hsr node_table file for dumping
* the node table
*
* Description:
* When debugfs is configured this routine sets up the node_table file per
* hsr device for dumping the node_table entries
*/
void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
{
struct dentry *de = NULL;
de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir);
if (IS_ERR(de)) {
pr_err("Cannot create hsr debugfs directory\n");
return;
}
priv->node_tbl_root = de;
de = debugfs_create_file("node_table", S_IFREG | 0444,
priv->node_tbl_root, priv,
&hsr_node_table_fops);
if (IS_ERR(de)) {
pr_err("Cannot create hsr node_table file\n");
debugfs_remove(priv->node_tbl_root);
priv->node_tbl_root = NULL;
return;
}
}
/* hsr_debugfs_term - Tear down debugfs intrastructure
*
* Description:
* When Debugfs is configured this routine removes debugfs file system
* elements that are specific to hsr
*/
void
hsr_debugfs_term(struct hsr_priv *priv)
{
debugfs_remove_recursive(priv->node_tbl_root);
priv->node_tbl_root = NULL;
}
void hsr_debugfs_create_root(void)
{
hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL);
if (IS_ERR(hsr_debugfs_root_dir)) {
pr_err("Cannot create hsr debugfs root directory\n");
hsr_debugfs_root_dir = NULL;
}
}
void hsr_debugfs_remove_root(void)
{
/* debugfs_remove() internally checks NULL and ERROR */
debugfs_remove(hsr_debugfs_root_dir);
}
| linux-master | net/hsr/hsr_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 Nicira, Inc.
*/
#include <linux/module.h>
#include <linux/openvswitch.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/sctp.h>
#include <linux/static_key.h>
#include <linux/string_helpers.h>
#include <net/ip.h>
#include <net/genetlink.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_count.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#include <net/ipv6_frag.h>
#if IS_ENABLED(CONFIG_NF_NAT)
#include <net/netfilter/nf_nat.h>
#endif
#include <net/netfilter/nf_conntrack_act_ct.h>
#include "datapath.h"
#include "drop.h"
#include "conntrack.h"
#include "flow.h"
#include "flow_netlink.h"
struct ovs_ct_len_tbl {
int maxlen;
int minlen;
};
/* Metadata mark for masked write to conntrack mark */
struct md_mark {
u32 value;
u32 mask;
};
/* Metadata label for masked write to conntrack label. */
struct md_labels {
struct ovs_key_ct_labels value;
struct ovs_key_ct_labels mask;
};
enum ovs_ct_nat {
OVS_CT_NAT = 1 << 0, /* NAT for committed connections only. */
OVS_CT_SRC_NAT = 1 << 1, /* Source NAT for NEW connections. */
OVS_CT_DST_NAT = 1 << 2, /* Destination NAT for NEW connections. */
};
/* Conntrack action context for execution. */
struct ovs_conntrack_info {
struct nf_conntrack_helper *helper;
struct nf_conntrack_zone zone;
struct nf_conn *ct;
u8 commit : 1;
u8 nat : 3; /* enum ovs_ct_nat */
u8 force : 1;
u8 have_eventmask : 1;
u16 family;
u32 eventmask; /* Mask of 1 << IPCT_*. */
struct md_mark mark;
struct md_labels labels;
char timeout[CTNL_TIMEOUT_NAME_MAX];
struct nf_ct_timeout *nf_ct_timeout;
#if IS_ENABLED(CONFIG_NF_NAT)
struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
#endif
};
#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
#define OVS_CT_LIMIT_UNLIMITED 0
#define OVS_CT_LIMIT_DEFAULT OVS_CT_LIMIT_UNLIMITED
#define CT_LIMIT_HASH_BUCKETS 512
static DEFINE_STATIC_KEY_FALSE(ovs_ct_limit_enabled);
struct ovs_ct_limit {
/* Elements in ovs_ct_limit_info->limits hash table */
struct hlist_node hlist_node;
struct rcu_head rcu;
u16 zone;
u32 limit;
};
struct ovs_ct_limit_info {
u32 default_limit;
struct hlist_head *limits;
struct nf_conncount_data *data;
};
static const struct nla_policy ct_limit_policy[OVS_CT_LIMIT_ATTR_MAX + 1] = {
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NLA_NESTED, },
};
#endif
static bool labels_nonzero(const struct ovs_key_ct_labels *labels);
static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info);
static u16 key_to_nfproto(const struct sw_flow_key *key)
{
switch (ntohs(key->eth.type)) {
case ETH_P_IP:
return NFPROTO_IPV4;
case ETH_P_IPV6:
return NFPROTO_IPV6;
default:
return NFPROTO_UNSPEC;
}
}
/* Map SKB connection state into the values used by flow definition. */
static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
{
u8 ct_state = OVS_CS_F_TRACKED;
switch (ctinfo) {
case IP_CT_ESTABLISHED_REPLY:
case IP_CT_RELATED_REPLY:
ct_state |= OVS_CS_F_REPLY_DIR;
break;
default:
break;
}
switch (ctinfo) {
case IP_CT_ESTABLISHED:
case IP_CT_ESTABLISHED_REPLY:
ct_state |= OVS_CS_F_ESTABLISHED;
break;
case IP_CT_RELATED:
case IP_CT_RELATED_REPLY:
ct_state |= OVS_CS_F_RELATED;
break;
case IP_CT_NEW:
ct_state |= OVS_CS_F_NEW;
break;
default:
break;
}
return ct_state;
}
static u32 ovs_ct_get_mark(const struct nf_conn *ct)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
return ct ? READ_ONCE(ct->mark) : 0;
#else
return 0;
#endif
}
/* Guard against conntrack labels max size shrinking below 128 bits. */
#if NF_CT_LABELS_MAX_SIZE < 16
#error NF_CT_LABELS_MAX_SIZE must be at least 16 bytes
#endif
static void ovs_ct_get_labels(const struct nf_conn *ct,
struct ovs_key_ct_labels *labels)
{
struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
if (cl)
memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
else
memset(labels, 0, OVS_CT_LABELS_LEN);
}
static void __ovs_ct_update_key_orig_tp(struct sw_flow_key *key,
const struct nf_conntrack_tuple *orig,
u8 icmp_proto)
{
key->ct_orig_proto = orig->dst.protonum;
if (orig->dst.protonum == icmp_proto) {
key->ct.orig_tp.src = htons(orig->dst.u.icmp.type);
key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code);
} else {
key->ct.orig_tp.src = orig->src.u.all;
key->ct.orig_tp.dst = orig->dst.u.all;
}
}
static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
const struct nf_conntrack_zone *zone,
const struct nf_conn *ct)
{
key->ct_state = state;
key->ct_zone = zone->id;
key->ct.mark = ovs_ct_get_mark(ct);
ovs_ct_get_labels(ct, &key->ct.labels);
if (ct) {
const struct nf_conntrack_tuple *orig;
/* Use the master if we have one. */
if (ct->master)
ct = ct->master;
orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
/* IP version must match with the master connection. */
if (key->eth.type == htons(ETH_P_IP) &&
nf_ct_l3num(ct) == NFPROTO_IPV4) {
key->ipv4.ct_orig.src = orig->src.u3.ip;
key->ipv4.ct_orig.dst = orig->dst.u3.ip;
__ovs_ct_update_key_orig_tp(key, orig, IPPROTO_ICMP);
return;
} else if (key->eth.type == htons(ETH_P_IPV6) &&
!sw_flow_key_is_nd(key) &&
nf_ct_l3num(ct) == NFPROTO_IPV6) {
key->ipv6.ct_orig.src = orig->src.u3.in6;
key->ipv6.ct_orig.dst = orig->dst.u3.in6;
__ovs_ct_update_key_orig_tp(key, orig, NEXTHDR_ICMP);
return;
}
}
/* Clear 'ct_orig_proto' to mark the non-existence of conntrack
* original direction key fields.
*/
key->ct_orig_proto = 0;
}
/* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has
* previously sent the packet to conntrack via the ct action. If
* 'keep_nat_flags' is true, the existing NAT flags retained, else they are
* initialized from the connection status.
*/
static void ovs_ct_update_key(const struct sk_buff *skb,
const struct ovs_conntrack_info *info,
struct sw_flow_key *key, bool post_ct,
bool keep_nat_flags)
{
const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
u8 state = 0;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
state = ovs_ct_get_state(ctinfo);
/* All unconfirmed entries are NEW connections. */
if (!nf_ct_is_confirmed(ct))
state |= OVS_CS_F_NEW;
/* OVS persists the related flag for the duration of the
* connection.
*/
if (ct->master)
state |= OVS_CS_F_RELATED;
if (keep_nat_flags) {
state |= key->ct_state & OVS_CS_F_NAT_MASK;
} else {
if (ct->status & IPS_SRC_NAT)
state |= OVS_CS_F_SRC_NAT;
if (ct->status & IPS_DST_NAT)
state |= OVS_CS_F_DST_NAT;
}
zone = nf_ct_zone(ct);
} else if (post_ct) {
state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
if (info)
zone = &info->zone;
}
__ovs_ct_update_key(key, state, zone, ct);
}
/* This is called to initialize CT key fields possibly coming in from the local
* stack.
*/
void ovs_ct_fill_key(const struct sk_buff *skb,
struct sw_flow_key *key,
bool post_ct)
{
ovs_ct_update_key(skb, NULL, key, post_ct, false);
}
int ovs_ct_put_key(const struct sw_flow_key *swkey,
const struct sw_flow_key *output, struct sk_buff *skb)
{
if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct_state))
return -EMSGSIZE;
if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct_zone))
return -EMSGSIZE;
if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, output->ct.mark))
return -EMSGSIZE;
if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(output->ct.labels),
&output->ct.labels))
return -EMSGSIZE;
if (swkey->ct_orig_proto) {
if (swkey->eth.type == htons(ETH_P_IP)) {
struct ovs_key_ct_tuple_ipv4 orig;
memset(&orig, 0, sizeof(orig));
orig.ipv4_src = output->ipv4.ct_orig.src;
orig.ipv4_dst = output->ipv4.ct_orig.dst;
orig.src_port = output->ct.orig_tp.src;
orig.dst_port = output->ct.orig_tp.dst;
orig.ipv4_proto = output->ct_orig_proto;
if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
sizeof(orig), &orig))
return -EMSGSIZE;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
struct ovs_key_ct_tuple_ipv6 orig;
memset(&orig, 0, sizeof(orig));
memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32,
sizeof(orig.ipv6_src));
memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32,
sizeof(orig.ipv6_dst));
orig.src_port = output->ct.orig_tp.src;
orig.dst_port = output->ct.orig_tp.dst;
orig.ipv6_proto = output->ct_orig_proto;
if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
sizeof(orig), &orig))
return -EMSGSIZE;
}
}
return 0;
}
static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key,
u32 ct_mark, u32 mask)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
u32 new_mark;
new_mark = ct_mark | (READ_ONCE(ct->mark) & ~(mask));
if (READ_ONCE(ct->mark) != new_mark) {
WRITE_ONCE(ct->mark, new_mark);
if (nf_ct_is_confirmed(ct))
nf_conntrack_event_cache(IPCT_MARK, ct);
key->ct.mark = new_mark;
}
return 0;
#else
return -ENOTSUPP;
#endif
}
static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct)
{
struct nf_conn_labels *cl;
cl = nf_ct_labels_find(ct);
if (!cl) {
nf_ct_labels_ext_add(ct);
cl = nf_ct_labels_find(ct);
}
return cl;
}
/* Initialize labels for a new, yet to be committed conntrack entry. Note that
* since the new connection is not yet confirmed, and thus no-one else has
* access to it's labels, we simply write them over.
*/
static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key,
const struct ovs_key_ct_labels *labels,
const struct ovs_key_ct_labels *mask)
{
struct nf_conn_labels *cl, *master_cl;
bool have_mask = labels_nonzero(mask);
/* Inherit master's labels to the related connection? */
master_cl = ct->master ? nf_ct_labels_find(ct->master) : NULL;
if (!master_cl && !have_mask)
return 0; /* Nothing to do. */
cl = ovs_ct_get_conn_labels(ct);
if (!cl)
return -ENOSPC;
/* Inherit the master's labels, if any. */
if (master_cl)
*cl = *master_cl;
if (have_mask) {
u32 *dst = (u32 *)cl->bits;
int i;
for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
dst[i] = (dst[i] & ~mask->ct_labels_32[i]) |
(labels->ct_labels_32[i]
& mask->ct_labels_32[i]);
}
/* Labels are included in the IPCTNL_MSG_CT_NEW event only if the
* IPCT_LABEL bit is set in the event cache.
*/
nf_conntrack_event_cache(IPCT_LABEL, ct);
memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
return 0;
}
static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key,
const struct ovs_key_ct_labels *labels,
const struct ovs_key_ct_labels *mask)
{
struct nf_conn_labels *cl;
int err;
cl = ovs_ct_get_conn_labels(ct);
if (!cl)
return -ENOSPC;
err = nf_connlabels_replace(ct, labels->ct_labels_32,
mask->ct_labels_32,
OVS_CT_LABELS_LEN_32);
if (err)
return err;
memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
return 0;
}
static int ovs_ct_handle_fragments(struct net *net, struct sw_flow_key *key,
u16 zone, int family, struct sk_buff *skb)
{
struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
int err;
err = nf_ct_handle_fragments(net, skb, zone, family, &key->ip.proto, &ovs_cb.mru);
if (err)
return err;
/* The key extracted from the fragment that completed this datagram
* likely didn't have an L4 header, so regenerate it.
*/
ovs_flow_key_update_l3l4(skb, key);
key->ip.frag = OVS_FRAG_TYPE_NONE;
*OVS_CB(skb) = ovs_cb;
return 0;
}
/* This replicates logic from nf_conntrack_core.c that is not exported. */
static enum ip_conntrack_info
ovs_ct_get_info(const struct nf_conntrack_tuple_hash *h)
{
const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
return IP_CT_ESTABLISHED_REPLY;
/* Once we've had two way comms, always ESTABLISHED. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
return IP_CT_ESTABLISHED;
if (test_bit(IPS_EXPECTED_BIT, &ct->status))
return IP_CT_RELATED;
return IP_CT_NEW;
}
/* Find an existing connection which this packet belongs to without
* re-attributing statistics or modifying the connection state. This allows an
* skb->_nfct lost due to an upcall to be recovered during actions execution.
*
* Must be called with rcu_read_lock.
*
* On success, populates skb->_nfct and returns the connection. Returns NULL
* if there is no existing entry.
*/
static struct nf_conn *
ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
u8 l3num, struct sk_buff *skb, bool natted)
{
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num,
net, &tuple)) {
pr_debug("ovs_ct_find_existing: Can't get tuple\n");
return NULL;
}
/* Must invert the tuple if skb has been transformed by NAT. */
if (natted) {
struct nf_conntrack_tuple inverse;
if (!nf_ct_invert_tuple(&inverse, &tuple)) {
pr_debug("ovs_ct_find_existing: Inversion failed!\n");
return NULL;
}
tuple = inverse;
}
/* look for tuple match */
h = nf_conntrack_find_get(net, zone, &tuple);
if (!h)
return NULL; /* Not found. */
ct = nf_ct_tuplehash_to_ctrack(h);
/* Inverted packet tuple matches the reverse direction conntrack tuple,
* select the other tuplehash to get the right 'ctinfo' bits for this
* packet.
*/
if (natted)
h = &ct->tuplehash[!h->tuple.dst.dir];
nf_ct_set(skb, ct, ovs_ct_get_info(h));
return ct;
}
static
struct nf_conn *ovs_ct_executed(struct net *net,
const struct sw_flow_key *key,
const struct ovs_conntrack_info *info,
struct sk_buff *skb,
bool *ct_executed)
{
struct nf_conn *ct = NULL;
/* If no ct, check if we have evidence that an existing conntrack entry
* might be found for this skb. This happens when we lose a skb->_nfct
* due to an upcall, or if the direction is being forced. If the
* connection was not confirmed, it is not cached and needs to be run
* through conntrack again.
*/
*ct_executed = (key->ct_state & OVS_CS_F_TRACKED) &&
!(key->ct_state & OVS_CS_F_INVALID) &&
(key->ct_zone == info->zone.id);
if (*ct_executed || (!key->ct_state && info->force)) {
ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
!!(key->ct_state &
OVS_CS_F_NAT_MASK));
}
return ct;
}
/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
static bool skb_nfct_cached(struct net *net,
const struct sw_flow_key *key,
const struct ovs_conntrack_info *info,
struct sk_buff *skb)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
bool ct_executed = true;
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
ct = ovs_ct_executed(net, key, info, skb, &ct_executed);
if (ct)
nf_ct_get(skb, &ctinfo);
else
return false;
if (!net_eq(net, read_pnet(&ct->ct_net)))
return false;
if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct)))
return false;
if (info->helper) {
struct nf_conn_help *help;
help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
if (help && rcu_access_pointer(help->helper) != info->helper)
return false;
}
if (info->nf_ct_timeout) {
struct nf_conn_timeout *timeout_ext;
timeout_ext = nf_ct_timeout_find(ct);
if (!timeout_ext || info->nf_ct_timeout !=
rcu_dereference(timeout_ext->timeout))
return false;
}
/* Force conntrack entry direction to the current packet? */
if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
/* Delete the conntrack entry if confirmed, else just release
* the reference.
*/
if (nf_ct_is_confirmed(ct))
nf_ct_delete(ct, 0, 0);
nf_ct_put(ct);
nf_ct_set(skb, NULL, 0);
return false;
}
return ct_executed;
}
#if IS_ENABLED(CONFIG_NF_NAT)
static void ovs_nat_update_key(struct sw_flow_key *key,
const struct sk_buff *skb,
enum nf_nat_manip_type maniptype)
{
if (maniptype == NF_NAT_MANIP_SRC) {
__be16 src;
key->ct_state |= OVS_CS_F_SRC_NAT;
if (key->eth.type == htons(ETH_P_IP))
key->ipv4.addr.src = ip_hdr(skb)->saddr;
else if (key->eth.type == htons(ETH_P_IPV6))
memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr,
sizeof(key->ipv6.addr.src));
else
return;
if (key->ip.proto == IPPROTO_UDP)
src = udp_hdr(skb)->source;
else if (key->ip.proto == IPPROTO_TCP)
src = tcp_hdr(skb)->source;
else if (key->ip.proto == IPPROTO_SCTP)
src = sctp_hdr(skb)->source;
else
return;
key->tp.src = src;
} else {
__be16 dst;
key->ct_state |= OVS_CS_F_DST_NAT;
if (key->eth.type == htons(ETH_P_IP))
key->ipv4.addr.dst = ip_hdr(skb)->daddr;
else if (key->eth.type == htons(ETH_P_IPV6))
memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr,
sizeof(key->ipv6.addr.dst));
else
return;
if (key->ip.proto == IPPROTO_UDP)
dst = udp_hdr(skb)->dest;
else if (key->ip.proto == IPPROTO_TCP)
dst = tcp_hdr(skb)->dest;
else if (key->ip.proto == IPPROTO_SCTP)
dst = sctp_hdr(skb)->dest;
else
return;
key->tp.dst = dst;
}
}
/* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */
static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
const struct ovs_conntrack_info *info,
struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
int err, action = 0;
if (!(info->nat & OVS_CT_NAT))
return NF_ACCEPT;
if (info->nat & OVS_CT_SRC_NAT)
action |= BIT(NF_NAT_MANIP_SRC);
if (info->nat & OVS_CT_DST_NAT)
action |= BIT(NF_NAT_MANIP_DST);
err = nf_ct_nat(skb, ct, ctinfo, &action, &info->range, info->commit);
if (action & BIT(NF_NAT_MANIP_SRC))
ovs_nat_update_key(key, skb, NF_NAT_MANIP_SRC);
if (action & BIT(NF_NAT_MANIP_DST))
ovs_nat_update_key(key, skb, NF_NAT_MANIP_DST);
return err;
}
#else /* !CONFIG_NF_NAT */
static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
const struct ovs_conntrack_info *info,
struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
return NF_ACCEPT;
}
#endif
/* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if
* not done already. Update key with new CT state after passing the packet
* through conntrack.
* Note that if the packet is deemed invalid by conntrack, skb->_nfct will be
* set to NULL and 0 will be returned.
*/
static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
const struct ovs_conntrack_info *info,
struct sk_buff *skb)
{
/* If we are recirculating packets to match on conntrack fields and
* committing with a separate conntrack action, then we don't need to
* actually run the packet through conntrack twice unless it's for a
* different zone.
*/
bool cached = skb_nfct_cached(net, key, info, skb);
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
if (!cached) {
struct nf_hook_state state = {
.hook = NF_INET_PRE_ROUTING,
.pf = info->family,
.net = net,
};
struct nf_conn *tmpl = info->ct;
int err;
/* Associate skb with specified zone. */
if (tmpl) {
ct = nf_ct_get(skb, &ctinfo);
nf_ct_put(ct);
nf_conntrack_get(&tmpl->ct_general);
nf_ct_set(skb, tmpl, IP_CT_NEW);
}
err = nf_conntrack_in(skb, &state);
if (err != NF_ACCEPT)
return -ENOENT;
/* Clear CT state NAT flags to mark that we have not yet done
* NAT after the nf_conntrack_in() call. We can actually clear
* the whole state, as it will be re-initialized below.
*/
key->ct_state = 0;
/* Update the key, but keep the NAT flags. */
ovs_ct_update_key(skb, info, key, true, true);
}
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
bool add_helper = false;
/* Packets starting a new connection must be NATted before the
* helper, so that the helper knows about the NAT. We enforce
* this by delaying both NAT and helper calls for unconfirmed
* connections until the committing CT action. For later
* packets NAT and Helper may be called in either order.
*
* NAT will be done only if the CT action has NAT, and only
* once per packet (per zone), as guarded by the NAT bits in
* the key->ct_state.
*/
if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) &&
(nf_ct_is_confirmed(ct) || info->commit) &&
ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) {
return -EINVAL;
}
/* Userspace may decide to perform a ct lookup without a helper
* specified followed by a (recirculate and) commit with one,
* or attach a helper in a later commit. Therefore, for
* connections which we will commit, we may need to attach
* the helper here.
*/
if (!nf_ct_is_confirmed(ct) && info->commit &&
info->helper && !nfct_help(ct)) {
int err = __nf_ct_try_assign_helper(ct, info->ct,
GFP_ATOMIC);
if (err)
return err;
add_helper = true;
/* helper installed, add seqadj if NAT is required */
if (info->nat && !nfct_seqadj(ct)) {
if (!nfct_seqadj_ext_add(ct))
return -EINVAL;
}
}
/* Call the helper only if:
* - nf_conntrack_in() was executed above ("!cached") or a
* helper was just attached ("add_helper") for a confirmed
* connection, or
* - When committing an unconfirmed connection.
*/
if ((nf_ct_is_confirmed(ct) ? !cached || add_helper :
info->commit) &&
nf_ct_helper(skb, ct, ctinfo, info->family) != NF_ACCEPT) {
return -EINVAL;
}
if (nf_ct_protonum(ct) == IPPROTO_TCP &&
nf_ct_is_confirmed(ct) && nf_conntrack_tcp_established(ct)) {
/* Be liberal for tcp packets so that out-of-window
* packets are not marked invalid.
*/
nf_ct_set_tcp_be_liberal(ct);
}
nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
}
return 0;
}
/* Lookup connection and read fields into key. */
static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
const struct ovs_conntrack_info *info,
struct sk_buff *skb)
{
struct nf_conn *ct;
int err;
err = __ovs_ct_lookup(net, key, info, skb);
if (err)
return err;
ct = (struct nf_conn *)skb_nfct(skb);
if (ct)
nf_ct_deliver_cached_events(ct);
return 0;
}
static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
{
size_t i;
for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
if (labels->ct_labels_32[i])
return true;
return false;
}
#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
static struct hlist_head *ct_limit_hash_bucket(
const struct ovs_ct_limit_info *info, u16 zone)
{
return &info->limits[zone & (CT_LIMIT_HASH_BUCKETS - 1)];
}
/* Call with ovs_mutex */
static void ct_limit_set(const struct ovs_ct_limit_info *info,
struct ovs_ct_limit *new_ct_limit)
{
struct ovs_ct_limit *ct_limit;
struct hlist_head *head;
head = ct_limit_hash_bucket(info, new_ct_limit->zone);
hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
if (ct_limit->zone == new_ct_limit->zone) {
hlist_replace_rcu(&ct_limit->hlist_node,
&new_ct_limit->hlist_node);
kfree_rcu(ct_limit, rcu);
return;
}
}
hlist_add_head_rcu(&new_ct_limit->hlist_node, head);
}
/* Call with ovs_mutex */
static void ct_limit_del(const struct ovs_ct_limit_info *info, u16 zone)
{
struct ovs_ct_limit *ct_limit;
struct hlist_head *head;
struct hlist_node *n;
head = ct_limit_hash_bucket(info, zone);
hlist_for_each_entry_safe(ct_limit, n, head, hlist_node) {
if (ct_limit->zone == zone) {
hlist_del_rcu(&ct_limit->hlist_node);
kfree_rcu(ct_limit, rcu);
return;
}
}
}
/* Call with RCU read lock */
static u32 ct_limit_get(const struct ovs_ct_limit_info *info, u16 zone)
{
struct ovs_ct_limit *ct_limit;
struct hlist_head *head;
head = ct_limit_hash_bucket(info, zone);
hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
if (ct_limit->zone == zone)
return ct_limit->limit;
}
return info->default_limit;
}
static int ovs_ct_check_limit(struct net *net,
const struct ovs_conntrack_info *info,
const struct nf_conntrack_tuple *tuple)
{
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
const struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
u32 per_zone_limit, connections;
u32 conncount_key;
conncount_key = info->zone.id;
per_zone_limit = ct_limit_get(ct_limit_info, info->zone.id);
if (per_zone_limit == OVS_CT_LIMIT_UNLIMITED)
return 0;
connections = nf_conncount_count(net, ct_limit_info->data,
&conncount_key, tuple, &info->zone);
if (connections > per_zone_limit)
return -ENOMEM;
return 0;
}
#endif
/* Lookup connection and confirm if unconfirmed. */
static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
const struct ovs_conntrack_info *info,
struct sk_buff *skb)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
int err;
err = __ovs_ct_lookup(net, key, info, skb);
if (err)
return err;
/* The connection could be invalid, in which case this is a no-op.*/
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
return 0;
#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
if (static_branch_unlikely(&ovs_ct_limit_enabled)) {
if (!nf_ct_is_confirmed(ct)) {
err = ovs_ct_check_limit(net, info,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
if (err) {
net_warn_ratelimited("openvswitch: zone: %u "
"exceeds conntrack limit\n",
info->zone.id);
return err;
}
}
}
#endif
/* Set the conntrack event mask if given. NEW and DELETE events have
* their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener
* typically would receive many kinds of updates. Setting the event
* mask allows those events to be filtered. The set event mask will
* remain in effect for the lifetime of the connection unless changed
* by a further CT action with both the commit flag and the eventmask
* option. */
if (info->have_eventmask) {
struct nf_conntrack_ecache *cache = nf_ct_ecache_find(ct);
if (cache)
cache->ctmask = info->eventmask;
}
/* Apply changes before confirming the connection so that the initial
* conntrack NEW netlink event carries the values given in the CT
* action.
*/
if (info->mark.mask) {
err = ovs_ct_set_mark(ct, key, info->mark.value,
info->mark.mask);
if (err)
return err;
}
if (!nf_ct_is_confirmed(ct)) {
err = ovs_ct_init_labels(ct, key, &info->labels.value,
&info->labels.mask);
if (err)
return err;
nf_conn_act_ct_ext_add(ct);
} else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
labels_nonzero(&info->labels.mask)) {
err = ovs_ct_set_labels(ct, key, &info->labels.value,
&info->labels.mask);
if (err)
return err;
}
/* This will take care of sending queued events even if the connection
* is already confirmed.
*/
if (nf_conntrack_confirm(skb) != NF_ACCEPT)
return -EINVAL;
return 0;
}
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
* value if 'skb' is freed.
*/
int ovs_ct_execute(struct net *net, struct sk_buff *skb,
struct sw_flow_key *key,
const struct ovs_conntrack_info *info)
{
int nh_ofs;
int err;
/* The conntrack module expects to be working at L3. */
nh_ofs = skb_network_offset(skb);
skb_pull_rcsum(skb, nh_ofs);
err = nf_ct_skb_network_trim(skb, info->family);
if (err) {
kfree_skb(skb);
return err;
}
if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
err = ovs_ct_handle_fragments(net, key, info->zone.id,
info->family, skb);
if (err)
return err;
}
if (info->commit)
err = ovs_ct_commit(net, key, info, skb);
else
err = ovs_ct_lookup(net, key, info, skb);
skb_push_rcsum(skb, nh_ofs);
if (err)
ovs_kfree_skb_reason(skb, OVS_DROP_CONNTRACK);
return err;
}
int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
nf_ct_put(ct);
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
if (key)
ovs_ct_fill_key(skb, key, false);
return 0;
}
#if IS_ENABLED(CONFIG_NF_NAT)
static int parse_nat(const struct nlattr *attr,
struct ovs_conntrack_info *info, bool log)
{
struct nlattr *a;
int rem;
bool have_ip_max = false;
bool have_proto_max = false;
bool ip_vers = (info->family == NFPROTO_IPV6);
nla_for_each_nested(a, attr, rem) {
static const int ovs_nat_attr_lens[OVS_NAT_ATTR_MAX + 1][2] = {
[OVS_NAT_ATTR_SRC] = {0, 0},
[OVS_NAT_ATTR_DST] = {0, 0},
[OVS_NAT_ATTR_IP_MIN] = {sizeof(struct in_addr),
sizeof(struct in6_addr)},
[OVS_NAT_ATTR_IP_MAX] = {sizeof(struct in_addr),
sizeof(struct in6_addr)},
[OVS_NAT_ATTR_PROTO_MIN] = {sizeof(u16), sizeof(u16)},
[OVS_NAT_ATTR_PROTO_MAX] = {sizeof(u16), sizeof(u16)},
[OVS_NAT_ATTR_PERSISTENT] = {0, 0},
[OVS_NAT_ATTR_PROTO_HASH] = {0, 0},
[OVS_NAT_ATTR_PROTO_RANDOM] = {0, 0},
};
int type = nla_type(a);
if (type > OVS_NAT_ATTR_MAX) {
OVS_NLERR(log, "Unknown NAT attribute (type=%d, max=%d)",
type, OVS_NAT_ATTR_MAX);
return -EINVAL;
}
if (nla_len(a) != ovs_nat_attr_lens[type][ip_vers]) {
OVS_NLERR(log, "NAT attribute type %d has unexpected length (%d != %d)",
type, nla_len(a),
ovs_nat_attr_lens[type][ip_vers]);
return -EINVAL;
}
switch (type) {
case OVS_NAT_ATTR_SRC:
case OVS_NAT_ATTR_DST:
if (info->nat) {
OVS_NLERR(log, "Only one type of NAT may be specified");
return -ERANGE;
}
info->nat |= OVS_CT_NAT;
info->nat |= ((type == OVS_NAT_ATTR_SRC)
? OVS_CT_SRC_NAT : OVS_CT_DST_NAT);
break;
case OVS_NAT_ATTR_IP_MIN:
nla_memcpy(&info->range.min_addr, a,
sizeof(info->range.min_addr));
info->range.flags |= NF_NAT_RANGE_MAP_IPS;
break;
case OVS_NAT_ATTR_IP_MAX:
have_ip_max = true;
nla_memcpy(&info->range.max_addr, a,
sizeof(info->range.max_addr));
info->range.flags |= NF_NAT_RANGE_MAP_IPS;
break;
case OVS_NAT_ATTR_PROTO_MIN:
info->range.min_proto.all = htons(nla_get_u16(a));
info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
break;
case OVS_NAT_ATTR_PROTO_MAX:
have_proto_max = true;
info->range.max_proto.all = htons(nla_get_u16(a));
info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
break;
case OVS_NAT_ATTR_PERSISTENT:
info->range.flags |= NF_NAT_RANGE_PERSISTENT;
break;
case OVS_NAT_ATTR_PROTO_HASH:
info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM;
break;
case OVS_NAT_ATTR_PROTO_RANDOM:
info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM_FULLY;
break;
default:
OVS_NLERR(log, "Unknown nat attribute (%d)", type);
return -EINVAL;
}
}
if (rem > 0) {
OVS_NLERR(log, "NAT attribute has %d unknown bytes", rem);
return -EINVAL;
}
if (!info->nat) {
/* Do not allow flags if no type is given. */
if (info->range.flags) {
OVS_NLERR(log,
"NAT flags may be given only when NAT range (SRC or DST) is also specified."
);
return -EINVAL;
}
info->nat = OVS_CT_NAT; /* NAT existing connections. */
} else if (!info->commit) {
OVS_NLERR(log,
"NAT attributes may be specified only when CT COMMIT flag is also specified."
);
return -EINVAL;
}
/* Allow missing IP_MAX. */
if (info->range.flags & NF_NAT_RANGE_MAP_IPS && !have_ip_max) {
memcpy(&info->range.max_addr, &info->range.min_addr,
sizeof(info->range.max_addr));
}
/* Allow missing PROTO_MAX. */
if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
!have_proto_max) {
info->range.max_proto.all = info->range.min_proto.all;
}
return 0;
}
#endif
static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
[OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 },
[OVS_CT_ATTR_FORCE_COMMIT] = { .minlen = 0, .maxlen = 0 },
[OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16),
.maxlen = sizeof(u16) },
[OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark),
.maxlen = sizeof(struct md_mark) },
[OVS_CT_ATTR_LABELS] = { .minlen = sizeof(struct md_labels),
.maxlen = sizeof(struct md_labels) },
[OVS_CT_ATTR_HELPER] = { .minlen = 1,
.maxlen = NF_CT_HELPER_NAME_LEN },
#if IS_ENABLED(CONFIG_NF_NAT)
/* NAT length is checked when parsing the nested attributes. */
[OVS_CT_ATTR_NAT] = { .minlen = 0, .maxlen = INT_MAX },
#endif
[OVS_CT_ATTR_EVENTMASK] = { .minlen = sizeof(u32),
.maxlen = sizeof(u32) },
[OVS_CT_ATTR_TIMEOUT] = { .minlen = 1,
.maxlen = CTNL_TIMEOUT_NAME_MAX },
};
static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
const char **helper, bool log)
{
struct nlattr *a;
int rem;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
int maxlen;
int minlen;
if (type > OVS_CT_ATTR_MAX) {
OVS_NLERR(log,
"Unknown conntrack attr (type=%d, max=%d)",
type, OVS_CT_ATTR_MAX);
return -EINVAL;
}
maxlen = ovs_ct_attr_lens[type].maxlen;
minlen = ovs_ct_attr_lens[type].minlen;
if (nla_len(a) < minlen || nla_len(a) > maxlen) {
OVS_NLERR(log,
"Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
type, nla_len(a), maxlen);
return -EINVAL;
}
switch (type) {
case OVS_CT_ATTR_FORCE_COMMIT:
info->force = true;
fallthrough;
case OVS_CT_ATTR_COMMIT:
info->commit = true;
break;
#ifdef CONFIG_NF_CONNTRACK_ZONES
case OVS_CT_ATTR_ZONE:
info->zone.id = nla_get_u16(a);
break;
#endif
#ifdef CONFIG_NF_CONNTRACK_MARK
case OVS_CT_ATTR_MARK: {
struct md_mark *mark = nla_data(a);
if (!mark->mask) {
OVS_NLERR(log, "ct_mark mask cannot be 0");
return -EINVAL;
}
info->mark = *mark;
break;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_LABELS
case OVS_CT_ATTR_LABELS: {
struct md_labels *labels = nla_data(a);
if (!labels_nonzero(&labels->mask)) {
OVS_NLERR(log, "ct_labels mask cannot be 0");
return -EINVAL;
}
info->labels = *labels;
break;
}
#endif
case OVS_CT_ATTR_HELPER:
*helper = nla_data(a);
if (!string_is_terminated(*helper, nla_len(a))) {
OVS_NLERR(log, "Invalid conntrack helper");
return -EINVAL;
}
break;
#if IS_ENABLED(CONFIG_NF_NAT)
case OVS_CT_ATTR_NAT: {
int err = parse_nat(a, info, log);
if (err)
return err;
break;
}
#endif
case OVS_CT_ATTR_EVENTMASK:
info->have_eventmask = true;
info->eventmask = nla_get_u32(a);
break;
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
case OVS_CT_ATTR_TIMEOUT:
memcpy(info->timeout, nla_data(a), nla_len(a));
if (!string_is_terminated(info->timeout, nla_len(a))) {
OVS_NLERR(log, "Invalid conntrack timeout");
return -EINVAL;
}
break;
#endif
default:
OVS_NLERR(log, "Unknown conntrack attr (%d)",
type);
return -EINVAL;
}
}
#ifdef CONFIG_NF_CONNTRACK_MARK
if (!info->commit && info->mark.mask) {
OVS_NLERR(log,
"Setting conntrack mark requires 'commit' flag.");
return -EINVAL;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_LABELS
if (!info->commit && labels_nonzero(&info->labels.mask)) {
OVS_NLERR(log,
"Setting conntrack labels requires 'commit' flag.");
return -EINVAL;
}
#endif
if (rem > 0) {
OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem);
return -EINVAL;
}
return 0;
}
bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
{
if (attr == OVS_KEY_ATTR_CT_STATE)
return true;
if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
attr == OVS_KEY_ATTR_CT_ZONE)
return true;
if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
attr == OVS_KEY_ATTR_CT_MARK)
return true;
if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
attr == OVS_KEY_ATTR_CT_LABELS) {
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
return ovs_net->xt_label;
}
return false;
}
int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa, bool log)
{
struct ovs_conntrack_info ct_info;
const char *helper = NULL;
u16 family;
int err;
family = key_to_nfproto(key);
if (family == NFPROTO_UNSPEC) {
OVS_NLERR(log, "ct family unspecified");
return -EINVAL;
}
memset(&ct_info, 0, sizeof(ct_info));
ct_info.family = family;
nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID,
NF_CT_DEFAULT_ZONE_DIR, 0);
err = parse_ct(attr, &ct_info, &helper, log);
if (err)
return err;
/* Set up template for tracking connections in specific zones. */
ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL);
if (!ct_info.ct) {
OVS_NLERR(log, "Failed to allocate conntrack template");
return -ENOMEM;
}
if (ct_info.timeout[0]) {
if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
ct_info.timeout))
pr_info_ratelimited("Failed to associated timeout "
"policy `%s'\n", ct_info.timeout);
else
ct_info.nf_ct_timeout = rcu_dereference(
nf_ct_timeout_find(ct_info.ct)->timeout);
}
if (helper) {
err = nf_ct_add_helper(ct_info.ct, helper, ct_info.family,
key->ip.proto, ct_info.nat, &ct_info.helper);
if (err) {
OVS_NLERR(log, "Failed to add %s helper %d", helper, err);
goto err_free_ct;
}
}
err = ovs_nla_add_action(sfa, OVS_ACTION_ATTR_CT, &ct_info,
sizeof(ct_info), log);
if (err)
goto err_free_ct;
if (ct_info.commit)
__set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
return 0;
err_free_ct:
__ovs_ct_free_action(&ct_info);
return err;
}
#if IS_ENABLED(CONFIG_NF_NAT)
static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
struct sk_buff *skb)
{
struct nlattr *start;
start = nla_nest_start_noflag(skb, OVS_CT_ATTR_NAT);
if (!start)
return false;
if (info->nat & OVS_CT_SRC_NAT) {
if (nla_put_flag(skb, OVS_NAT_ATTR_SRC))
return false;
} else if (info->nat & OVS_CT_DST_NAT) {
if (nla_put_flag(skb, OVS_NAT_ATTR_DST))
return false;
} else {
goto out;
}
if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
if (IS_ENABLED(CONFIG_NF_NAT) &&
info->family == NFPROTO_IPV4) {
if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
info->range.min_addr.ip) ||
(info->range.max_addr.ip
!= info->range.min_addr.ip &&
(nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
info->range.max_addr.ip))))
return false;
} else if (IS_ENABLED(CONFIG_IPV6) &&
info->family == NFPROTO_IPV6) {
if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
&info->range.min_addr.in6) ||
(memcmp(&info->range.max_addr.in6,
&info->range.min_addr.in6,
sizeof(info->range.max_addr.in6)) &&
(nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX,
&info->range.max_addr.in6))))
return false;
} else {
return false;
}
}
if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
(nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MIN,
ntohs(info->range.min_proto.all)) ||
(info->range.max_proto.all != info->range.min_proto.all &&
nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MAX,
ntohs(info->range.max_proto.all)))))
return false;
if (info->range.flags & NF_NAT_RANGE_PERSISTENT &&
nla_put_flag(skb, OVS_NAT_ATTR_PERSISTENT))
return false;
if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM &&
nla_put_flag(skb, OVS_NAT_ATTR_PROTO_HASH))
return false;
if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY &&
nla_put_flag(skb, OVS_NAT_ATTR_PROTO_RANDOM))
return false;
out:
nla_nest_end(skb, start);
return true;
}
#endif
int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
struct sk_buff *skb)
{
struct nlattr *start;
start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CT);
if (!start)
return -EMSGSIZE;
if (ct_info->commit && nla_put_flag(skb, ct_info->force
? OVS_CT_ATTR_FORCE_COMMIT
: OVS_CT_ATTR_COMMIT))
return -EMSGSIZE;
if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
return -EMSGSIZE;
if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
&ct_info->mark))
return -EMSGSIZE;
if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
labels_nonzero(&ct_info->labels.mask) &&
nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
&ct_info->labels))
return -EMSGSIZE;
if (ct_info->helper) {
if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
ct_info->helper->name))
return -EMSGSIZE;
}
if (ct_info->have_eventmask &&
nla_put_u32(skb, OVS_CT_ATTR_EVENTMASK, ct_info->eventmask))
return -EMSGSIZE;
if (ct_info->timeout[0]) {
if (nla_put_string(skb, OVS_CT_ATTR_TIMEOUT, ct_info->timeout))
return -EMSGSIZE;
}
#if IS_ENABLED(CONFIG_NF_NAT)
if (ct_info->nat && !ovs_ct_nat_to_attr(ct_info, skb))
return -EMSGSIZE;
#endif
nla_nest_end(skb, start);
return 0;
}
void ovs_ct_free_action(const struct nlattr *a)
{
struct ovs_conntrack_info *ct_info = nla_data(a);
__ovs_ct_free_action(ct_info);
}
static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
{
if (ct_info->helper) {
#if IS_ENABLED(CONFIG_NF_NAT)
if (ct_info->nat)
nf_nat_helper_put(ct_info->helper);
#endif
nf_conntrack_helper_put(ct_info->helper);
}
if (ct_info->ct) {
if (ct_info->timeout[0])
nf_ct_destroy_timeout(ct_info->ct);
nf_ct_tmpl_free(ct_info->ct);
}
}
#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
static int ovs_ct_limit_init(struct net *net, struct ovs_net *ovs_net)
{
int i, err;
ovs_net->ct_limit_info = kmalloc(sizeof(*ovs_net->ct_limit_info),
GFP_KERNEL);
if (!ovs_net->ct_limit_info)
return -ENOMEM;
ovs_net->ct_limit_info->default_limit = OVS_CT_LIMIT_DEFAULT;
ovs_net->ct_limit_info->limits =
kmalloc_array(CT_LIMIT_HASH_BUCKETS, sizeof(struct hlist_head),
GFP_KERNEL);
if (!ovs_net->ct_limit_info->limits) {
kfree(ovs_net->ct_limit_info);
return -ENOMEM;
}
for (i = 0; i < CT_LIMIT_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(&ovs_net->ct_limit_info->limits[i]);
ovs_net->ct_limit_info->data =
nf_conncount_init(net, NFPROTO_INET, sizeof(u32));
if (IS_ERR(ovs_net->ct_limit_info->data)) {
err = PTR_ERR(ovs_net->ct_limit_info->data);
kfree(ovs_net->ct_limit_info->limits);
kfree(ovs_net->ct_limit_info);
pr_err("openvswitch: failed to init nf_conncount %d\n", err);
return err;
}
return 0;
}
static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
{
const struct ovs_ct_limit_info *info = ovs_net->ct_limit_info;
int i;
nf_conncount_destroy(net, NFPROTO_INET, info->data);
for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
struct hlist_head *head = &info->limits[i];
struct ovs_ct_limit *ct_limit;
hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
lockdep_ovsl_is_held())
kfree_rcu(ct_limit, rcu);
}
kfree(info->limits);
kfree(info);
}
static struct sk_buff *
ovs_ct_limit_cmd_reply_start(struct genl_info *info, u8 cmd,
struct ovs_header **ovs_reply_header)
{
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct sk_buff *skb;
skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
*ovs_reply_header = genlmsg_put(skb, info->snd_portid,
info->snd_seq,
&dp_ct_limit_genl_family, 0, cmd);
if (!*ovs_reply_header) {
nlmsg_free(skb);
return ERR_PTR(-EMSGSIZE);
}
(*ovs_reply_header)->dp_ifindex = ovs_header->dp_ifindex;
return skb;
}
static bool check_zone_id(int zone_id, u16 *pzone)
{
if (zone_id >= 0 && zone_id <= 65535) {
*pzone = (u16)zone_id;
return true;
}
return false;
}
static int ovs_ct_limit_set_zone_limit(struct nlattr *nla_zone_limit,
struct ovs_ct_limit_info *info)
{
struct ovs_zone_limit *zone_limit;
int rem;
u16 zone;
rem = NLA_ALIGN(nla_len(nla_zone_limit));
zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
while (rem >= sizeof(*zone_limit)) {
if (unlikely(zone_limit->zone_id ==
OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
ovs_lock();
info->default_limit = zone_limit->limit;
ovs_unlock();
} else if (unlikely(!check_zone_id(
zone_limit->zone_id, &zone))) {
OVS_NLERR(true, "zone id is out of range");
} else {
struct ovs_ct_limit *ct_limit;
ct_limit = kmalloc(sizeof(*ct_limit),
GFP_KERNEL_ACCOUNT);
if (!ct_limit)
return -ENOMEM;
ct_limit->zone = zone;
ct_limit->limit = zone_limit->limit;
ovs_lock();
ct_limit_set(info, ct_limit);
ovs_unlock();
}
rem -= NLA_ALIGN(sizeof(*zone_limit));
zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
NLA_ALIGN(sizeof(*zone_limit)));
}
if (rem)
OVS_NLERR(true, "set zone limit has %d unknown bytes", rem);
return 0;
}
static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
struct ovs_ct_limit_info *info)
{
struct ovs_zone_limit *zone_limit;
int rem;
u16 zone;
rem = NLA_ALIGN(nla_len(nla_zone_limit));
zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
while (rem >= sizeof(*zone_limit)) {
if (unlikely(zone_limit->zone_id ==
OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
ovs_lock();
info->default_limit = OVS_CT_LIMIT_DEFAULT;
ovs_unlock();
} else if (unlikely(!check_zone_id(
zone_limit->zone_id, &zone))) {
OVS_NLERR(true, "zone id is out of range");
} else {
ovs_lock();
ct_limit_del(info, zone);
ovs_unlock();
}
rem -= NLA_ALIGN(sizeof(*zone_limit));
zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
NLA_ALIGN(sizeof(*zone_limit)));
}
if (rem)
OVS_NLERR(true, "del zone limit has %d unknown bytes", rem);
return 0;
}
static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
struct sk_buff *reply)
{
struct ovs_zone_limit zone_limit = {
.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE,
.limit = info->default_limit,
};
return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
}
static int __ovs_ct_limit_get_zone_limit(struct net *net,
struct nf_conncount_data *data,
u16 zone_id, u32 limit,
struct sk_buff *reply)
{
struct nf_conntrack_zone ct_zone;
struct ovs_zone_limit zone_limit;
u32 conncount_key = zone_id;
zone_limit.zone_id = zone_id;
zone_limit.limit = limit;
nf_ct_zone_init(&ct_zone, zone_id, NF_CT_DEFAULT_ZONE_DIR, 0);
zone_limit.count = nf_conncount_count(net, data, &conncount_key, NULL,
&ct_zone);
return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
}
static int ovs_ct_limit_get_zone_limit(struct net *net,
struct nlattr *nla_zone_limit,
struct ovs_ct_limit_info *info,
struct sk_buff *reply)
{
struct ovs_zone_limit *zone_limit;
int rem, err;
u32 limit;
u16 zone;
rem = NLA_ALIGN(nla_len(nla_zone_limit));
zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
while (rem >= sizeof(*zone_limit)) {
if (unlikely(zone_limit->zone_id ==
OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
err = ovs_ct_limit_get_default_limit(info, reply);
if (err)
return err;
} else if (unlikely(!check_zone_id(zone_limit->zone_id,
&zone))) {
OVS_NLERR(true, "zone id is out of range");
} else {
rcu_read_lock();
limit = ct_limit_get(info, zone);
rcu_read_unlock();
err = __ovs_ct_limit_get_zone_limit(
net, info->data, zone, limit, reply);
if (err)
return err;
}
rem -= NLA_ALIGN(sizeof(*zone_limit));
zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
NLA_ALIGN(sizeof(*zone_limit)));
}
if (rem)
OVS_NLERR(true, "get zone limit has %d unknown bytes", rem);
return 0;
}
static int ovs_ct_limit_get_all_zone_limit(struct net *net,
struct ovs_ct_limit_info *info,
struct sk_buff *reply)
{
struct ovs_ct_limit *ct_limit;
struct hlist_head *head;
int i, err = 0;
err = ovs_ct_limit_get_default_limit(info, reply);
if (err)
return err;
rcu_read_lock();
for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
head = &info->limits[i];
hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
err = __ovs_ct_limit_get_zone_limit(net, info->data,
ct_limit->zone, ct_limit->limit, reply);
if (err)
goto exit_err;
}
}
exit_err:
rcu_read_unlock();
return err;
}
static int ovs_ct_limit_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct sk_buff *reply;
struct ovs_header *ovs_reply_header;
struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
int err;
reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_SET,
&ovs_reply_header);
if (IS_ERR(reply))
return PTR_ERR(reply);
if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
err = -EINVAL;
goto exit_err;
}
err = ovs_ct_limit_set_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
ct_limit_info);
if (err)
goto exit_err;
static_branch_enable(&ovs_ct_limit_enabled);
genlmsg_end(reply, ovs_reply_header);
return genlmsg_reply(reply, info);
exit_err:
nlmsg_free(reply);
return err;
}
static int ovs_ct_limit_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct sk_buff *reply;
struct ovs_header *ovs_reply_header;
struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
int err;
reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_DEL,
&ovs_reply_header);
if (IS_ERR(reply))
return PTR_ERR(reply);
if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
err = -EINVAL;
goto exit_err;
}
err = ovs_ct_limit_del_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
ct_limit_info);
if (err)
goto exit_err;
genlmsg_end(reply, ovs_reply_header);
return genlmsg_reply(reply, info);
exit_err:
nlmsg_free(reply);
return err;
}
static int ovs_ct_limit_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct nlattr *nla_reply;
struct sk_buff *reply;
struct ovs_header *ovs_reply_header;
struct net *net = sock_net(skb->sk);
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
int err;
reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_GET,
&ovs_reply_header);
if (IS_ERR(reply))
return PTR_ERR(reply);
nla_reply = nla_nest_start_noflag(reply, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
if (!nla_reply) {
err = -EMSGSIZE;
goto exit_err;
}
if (a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
err = ovs_ct_limit_get_zone_limit(
net, a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT], ct_limit_info,
reply);
if (err)
goto exit_err;
} else {
err = ovs_ct_limit_get_all_zone_limit(net, ct_limit_info,
reply);
if (err)
goto exit_err;
}
nla_nest_end(reply, nla_reply);
genlmsg_end(reply, ovs_reply_header);
return genlmsg_reply(reply, info);
exit_err:
nlmsg_free(reply);
return err;
}
static const struct genl_small_ops ct_limit_genl_ops[] = {
{ .cmd = OVS_CT_LIMIT_CMD_SET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN
* privilege.
*/
.doit = ovs_ct_limit_cmd_set,
},
{ .cmd = OVS_CT_LIMIT_CMD_DEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN
* privilege.
*/
.doit = ovs_ct_limit_cmd_del,
},
{ .cmd = OVS_CT_LIMIT_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = 0, /* OK for unprivileged users. */
.doit = ovs_ct_limit_cmd_get,
},
};
static const struct genl_multicast_group ovs_ct_limit_multicast_group = {
.name = OVS_CT_LIMIT_MCGROUP,
};
struct genl_family dp_ct_limit_genl_family __ro_after_init = {
.hdrsize = sizeof(struct ovs_header),
.name = OVS_CT_LIMIT_FAMILY,
.version = OVS_CT_LIMIT_VERSION,
.maxattr = OVS_CT_LIMIT_ATTR_MAX,
.policy = ct_limit_policy,
.netnsok = true,
.parallel_ops = true,
.small_ops = ct_limit_genl_ops,
.n_small_ops = ARRAY_SIZE(ct_limit_genl_ops),
.resv_start_op = OVS_CT_LIMIT_CMD_GET + 1,
.mcgrps = &ovs_ct_limit_multicast_group,
.n_mcgrps = 1,
.module = THIS_MODULE,
};
#endif
int ovs_ct_init(struct net *net)
{
unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
if (nf_connlabels_get(net, n_bits - 1)) {
ovs_net->xt_label = false;
OVS_NLERR(true, "Failed to set connlabel length");
} else {
ovs_net->xt_label = true;
}
#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
return ovs_ct_limit_init(net, ovs_net);
#else
return 0;
#endif
}
void ovs_ct_exit(struct net *net)
{
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
ovs_ct_limit_exit(net, ovs_net);
#endif
if (ovs_net->xt_label)
nf_connlabels_put(net);
}
| linux-master | net/openvswitch/conntrack.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2014 Nicira, Inc.
*/
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <net/llc_pdu.h>
#include <linux/kernel.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/llc.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/rcupdate.h>
#include <linux/cpumask.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/mpls.h>
#include <linux/sctp.h>
#include <linux/smp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/rculist.h>
#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/ipv6.h>
#include <net/mpls.h>
#include <net/ndisc.h>
#include <net/nsh.h>
#include <net/pkt_cls.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include "conntrack.h"
#include "datapath.h"
#include "flow.h"
#include "flow_netlink.h"
#include "vport.h"
u64 ovs_flow_used_time(unsigned long flow_jiffies)
{
struct timespec64 cur_ts;
u64 cur_ms, idle_ms;
ktime_get_ts64(&cur_ts);
idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
cur_ms = (u64)(u32)cur_ts.tv_sec * MSEC_PER_SEC +
cur_ts.tv_nsec / NSEC_PER_MSEC;
return cur_ms - idle_ms;
}
#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
const struct sk_buff *skb)
{
struct sw_flow_stats *stats;
unsigned int cpu = smp_processor_id();
int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
stats = rcu_dereference(flow->stats[cpu]);
/* Check if already have CPU-specific stats. */
if (likely(stats)) {
spin_lock(&stats->lock);
/* Mark if we write on the pre-allocated stats. */
if (cpu == 0 && unlikely(flow->stats_last_writer != cpu))
flow->stats_last_writer = cpu;
} else {
stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
spin_lock(&stats->lock);
/* If the current CPU is the only writer on the
* pre-allocated stats keep using them.
*/
if (unlikely(flow->stats_last_writer != cpu)) {
/* A previous locker may have already allocated the
* stats, so we need to check again. If CPU-specific
* stats were already allocated, we update the pre-
* allocated stats as we have already locked them.
*/
if (likely(flow->stats_last_writer != -1) &&
likely(!rcu_access_pointer(flow->stats[cpu]))) {
/* Try to allocate CPU-specific stats. */
struct sw_flow_stats *new_stats;
new_stats =
kmem_cache_alloc_node(flow_stats_cache,
GFP_NOWAIT |
__GFP_THISNODE |
__GFP_NOWARN |
__GFP_NOMEMALLOC,
numa_node_id());
if (likely(new_stats)) {
new_stats->used = jiffies;
new_stats->packet_count = 1;
new_stats->byte_count = len;
new_stats->tcp_flags = tcp_flags;
spin_lock_init(&new_stats->lock);
rcu_assign_pointer(flow->stats[cpu],
new_stats);
cpumask_set_cpu(cpu,
flow->cpu_used_mask);
goto unlock;
}
}
flow->stats_last_writer = cpu;
}
}
stats->used = jiffies;
stats->packet_count++;
stats->byte_count += len;
stats->tcp_flags |= tcp_flags;
unlock:
spin_unlock(&stats->lock);
}
/* Must be called with rcu_read_lock or ovs_mutex. */
void ovs_flow_stats_get(const struct sw_flow *flow,
struct ovs_flow_stats *ovs_stats,
unsigned long *used, __be16 *tcp_flags)
{
int cpu;
*used = 0;
*tcp_flags = 0;
memset(ovs_stats, 0, sizeof(*ovs_stats));
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids;
cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) {
/* Local CPU may write on non-local stats, so we must
* block bottom-halves here.
*/
spin_lock_bh(&stats->lock);
if (!*used || time_after(stats->used, *used))
*used = stats->used;
*tcp_flags |= stats->tcp_flags;
ovs_stats->n_packets += stats->packet_count;
ovs_stats->n_bytes += stats->byte_count;
spin_unlock_bh(&stats->lock);
}
}
}
/* Called with ovs_mutex. */
void ovs_flow_stats_clear(struct sw_flow *flow)
{
int cpu;
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids;
cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) {
spin_lock_bh(&stats->lock);
stats->used = 0;
stats->packet_count = 0;
stats->byte_count = 0;
stats->tcp_flags = 0;
spin_unlock_bh(&stats->lock);
}
}
}
static int check_header(struct sk_buff *skb, int len)
{
if (unlikely(skb->len < len))
return -EINVAL;
if (unlikely(!pskb_may_pull(skb, len)))
return -ENOMEM;
return 0;
}
static bool arphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_network_offset(skb) +
sizeof(struct arp_eth_header));
}
static int check_iphdr(struct sk_buff *skb)
{
unsigned int nh_ofs = skb_network_offset(skb);
unsigned int ip_len;
int err;
err = check_header(skb, nh_ofs + sizeof(struct iphdr));
if (unlikely(err))
return err;
ip_len = ip_hdrlen(skb);
if (unlikely(ip_len < sizeof(struct iphdr) ||
skb->len < nh_ofs + ip_len))
return -EINVAL;
skb_set_transport_header(skb, nh_ofs + ip_len);
return 0;
}
static bool tcphdr_ok(struct sk_buff *skb)
{
int th_ofs = skb_transport_offset(skb);
int tcp_len;
if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
return false;
tcp_len = tcp_hdrlen(skb);
if (unlikely(tcp_len < sizeof(struct tcphdr) ||
skb->len < th_ofs + tcp_len))
return false;
return true;
}
static bool udphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_transport_offset(skb) +
sizeof(struct udphdr));
}
static bool sctphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_transport_offset(skb) +
sizeof(struct sctphdr));
}
static bool icmphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_transport_offset(skb) +
sizeof(struct icmphdr));
}
/**
* get_ipv6_ext_hdrs() - Parses packet and sets IPv6 extension header flags.
*
* @skb: buffer where extension header data starts in packet
* @nh: ipv6 header
* @ext_hdrs: flags are stored here
*
* OFPIEH12_UNREP is set if more than one of a given IPv6 extension header
* is unexpectedly encountered. (Two destination options headers may be
* expected and would not cause this bit to be set.)
*
* OFPIEH12_UNSEQ is set if IPv6 extension headers were not in the order
* preferred (but not required) by RFC 2460:
*
* When more than one extension header is used in the same packet, it is
* recommended that those headers appear in the following order:
* IPv6 header
* Hop-by-Hop Options header
* Destination Options header
* Routing header
* Fragment header
* Authentication header
* Encapsulating Security Payload header
* Destination Options header
* upper-layer header
*/
static void get_ipv6_ext_hdrs(struct sk_buff *skb, struct ipv6hdr *nh,
u16 *ext_hdrs)
{
u8 next_type = nh->nexthdr;
unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
int dest_options_header_count = 0;
*ext_hdrs = 0;
while (ipv6_ext_hdr(next_type)) {
struct ipv6_opt_hdr _hdr, *hp;
switch (next_type) {
case IPPROTO_NONE:
*ext_hdrs |= OFPIEH12_NONEXT;
/* stop parsing */
return;
case IPPROTO_ESP:
if (*ext_hdrs & OFPIEH12_ESP)
*ext_hdrs |= OFPIEH12_UNREP;
if ((*ext_hdrs & ~(OFPIEH12_HOP | OFPIEH12_DEST |
OFPIEH12_ROUTER | IPPROTO_FRAGMENT |
OFPIEH12_AUTH | OFPIEH12_UNREP)) ||
dest_options_header_count >= 2) {
*ext_hdrs |= OFPIEH12_UNSEQ;
}
*ext_hdrs |= OFPIEH12_ESP;
break;
case IPPROTO_AH:
if (*ext_hdrs & OFPIEH12_AUTH)
*ext_hdrs |= OFPIEH12_UNREP;
if ((*ext_hdrs &
~(OFPIEH12_HOP | OFPIEH12_DEST | OFPIEH12_ROUTER |
IPPROTO_FRAGMENT | OFPIEH12_UNREP)) ||
dest_options_header_count >= 2) {
*ext_hdrs |= OFPIEH12_UNSEQ;
}
*ext_hdrs |= OFPIEH12_AUTH;
break;
case IPPROTO_DSTOPTS:
if (dest_options_header_count == 0) {
if (*ext_hdrs &
~(OFPIEH12_HOP | OFPIEH12_UNREP))
*ext_hdrs |= OFPIEH12_UNSEQ;
*ext_hdrs |= OFPIEH12_DEST;
} else if (dest_options_header_count == 1) {
if (*ext_hdrs &
~(OFPIEH12_HOP | OFPIEH12_DEST |
OFPIEH12_ROUTER | OFPIEH12_FRAG |
OFPIEH12_AUTH | OFPIEH12_ESP |
OFPIEH12_UNREP)) {
*ext_hdrs |= OFPIEH12_UNSEQ;
}
} else {
*ext_hdrs |= OFPIEH12_UNREP;
}
dest_options_header_count++;
break;
case IPPROTO_FRAGMENT:
if (*ext_hdrs & OFPIEH12_FRAG)
*ext_hdrs |= OFPIEH12_UNREP;
if ((*ext_hdrs & ~(OFPIEH12_HOP |
OFPIEH12_DEST |
OFPIEH12_ROUTER |
OFPIEH12_UNREP)) ||
dest_options_header_count >= 2) {
*ext_hdrs |= OFPIEH12_UNSEQ;
}
*ext_hdrs |= OFPIEH12_FRAG;
break;
case IPPROTO_ROUTING:
if (*ext_hdrs & OFPIEH12_ROUTER)
*ext_hdrs |= OFPIEH12_UNREP;
if ((*ext_hdrs & ~(OFPIEH12_HOP |
OFPIEH12_DEST |
OFPIEH12_UNREP)) ||
dest_options_header_count >= 2) {
*ext_hdrs |= OFPIEH12_UNSEQ;
}
*ext_hdrs |= OFPIEH12_ROUTER;
break;
case IPPROTO_HOPOPTS:
if (*ext_hdrs & OFPIEH12_HOP)
*ext_hdrs |= OFPIEH12_UNREP;
/* OFPIEH12_HOP is set to 1 if a hop-by-hop IPv6
* extension header is present as the first
* extension header in the packet.
*/
if (*ext_hdrs == 0)
*ext_hdrs |= OFPIEH12_HOP;
else
*ext_hdrs |= OFPIEH12_UNSEQ;
break;
default:
return;
}
hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
if (!hp)
break;
next_type = hp->nexthdr;
start += ipv6_optlen(hp);
}
}
static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
{
unsigned short frag_off;
unsigned int payload_ofs = 0;
unsigned int nh_ofs = skb_network_offset(skb);
unsigned int nh_len;
struct ipv6hdr *nh;
int err, nexthdr, flags = 0;
err = check_header(skb, nh_ofs + sizeof(*nh));
if (unlikely(err))
return err;
nh = ipv6_hdr(skb);
get_ipv6_ext_hdrs(skb, nh, &key->ipv6.exthdrs);
key->ip.proto = NEXTHDR_NONE;
key->ip.tos = ipv6_get_dsfield(nh);
key->ip.ttl = nh->hop_limit;
key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
key->ipv6.addr.src = nh->saddr;
key->ipv6.addr.dst = nh->daddr;
nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
if (flags & IP6_FH_F_FRAG) {
if (frag_off) {
key->ip.frag = OVS_FRAG_TYPE_LATER;
key->ip.proto = NEXTHDR_FRAGMENT;
return 0;
}
key->ip.frag = OVS_FRAG_TYPE_FIRST;
} else {
key->ip.frag = OVS_FRAG_TYPE_NONE;
}
/* Delayed handling of error in ipv6_find_hdr() as it
* always sets flags and frag_off to a valid value which may be
* used to set key->ip.frag above.
*/
if (unlikely(nexthdr < 0))
return -EPROTO;
nh_len = payload_ofs - nh_ofs;
skb_set_transport_header(skb, nh_ofs + nh_len);
key->ip.proto = nexthdr;
return nh_len;
}
static bool icmp6hdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_transport_offset(skb) +
sizeof(struct icmp6hdr));
}
/**
* parse_vlan_tag - Parse vlan tag from vlan header.
* @skb: skb containing frame to parse
* @key_vh: pointer to parsed vlan tag
* @untag_vlan: should the vlan header be removed from the frame
*
* Return: ERROR on memory error.
* %0 if it encounters a non-vlan or incomplete packet.
* %1 after successfully parsing vlan tag.
*/
static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
bool untag_vlan)
{
struct vlan_head *vh = (struct vlan_head *)skb->data;
if (likely(!eth_type_vlan(vh->tpid)))
return 0;
if (unlikely(skb->len < sizeof(struct vlan_head) + sizeof(__be16)))
return 0;
if (unlikely(!pskb_may_pull(skb, sizeof(struct vlan_head) +
sizeof(__be16))))
return -ENOMEM;
vh = (struct vlan_head *)skb->data;
key_vh->tci = vh->tci | htons(VLAN_CFI_MASK);
key_vh->tpid = vh->tpid;
if (unlikely(untag_vlan)) {
int offset = skb->data - skb_mac_header(skb);
u16 tci;
int err;
__skb_push(skb, offset);
err = __skb_vlan_pop(skb, &tci);
__skb_pull(skb, offset);
if (err)
return err;
__vlan_hwaccel_put_tag(skb, key_vh->tpid, tci);
} else {
__skb_pull(skb, sizeof(struct vlan_head));
}
return 1;
}
static void clear_vlan(struct sw_flow_key *key)
{
key->eth.vlan.tci = 0;
key->eth.vlan.tpid = 0;
key->eth.cvlan.tci = 0;
key->eth.cvlan.tpid = 0;
}
static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
{
int res;
if (skb_vlan_tag_present(skb)) {
key->eth.vlan.tci = htons(skb->vlan_tci) | htons(VLAN_CFI_MASK);
key->eth.vlan.tpid = skb->vlan_proto;
} else {
/* Parse outer vlan tag in the non-accelerated case. */
res = parse_vlan_tag(skb, &key->eth.vlan, true);
if (res <= 0)
return res;
}
/* Parse inner vlan tag. */
res = parse_vlan_tag(skb, &key->eth.cvlan, false);
if (res <= 0)
return res;
return 0;
}
static __be16 parse_ethertype(struct sk_buff *skb)
{
struct llc_snap_hdr {
u8 dsap; /* Always 0xAA */
u8 ssap; /* Always 0xAA */
u8 ctrl;
u8 oui[3];
__be16 ethertype;
};
struct llc_snap_hdr *llc;
__be16 proto;
proto = *(__be16 *) skb->data;
__skb_pull(skb, sizeof(__be16));
if (eth_proto_is_802_3(proto))
return proto;
if (skb->len < sizeof(struct llc_snap_hdr))
return htons(ETH_P_802_2);
if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
return htons(0);
llc = (struct llc_snap_hdr *) skb->data;
if (llc->dsap != LLC_SAP_SNAP ||
llc->ssap != LLC_SAP_SNAP ||
(llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
return htons(ETH_P_802_2);
__skb_pull(skb, sizeof(struct llc_snap_hdr));
if (eth_proto_is_802_3(llc->ethertype))
return llc->ethertype;
return htons(ETH_P_802_2);
}
static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
int nh_len)
{
struct icmp6hdr *icmp = icmp6_hdr(skb);
/* The ICMPv6 type and code fields use the 16-bit transport port
* fields, so we need to store them in 16-bit network byte order.
*/
key->tp.src = htons(icmp->icmp6_type);
key->tp.dst = htons(icmp->icmp6_code);
memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
if (icmp->icmp6_code == 0 &&
(icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
int icmp_len = skb->len - skb_transport_offset(skb);
struct nd_msg *nd;
int offset;
/* In order to process neighbor discovery options, we need the
* entire packet.
*/
if (unlikely(icmp_len < sizeof(*nd)))
return 0;
if (unlikely(skb_linearize(skb)))
return -ENOMEM;
nd = (struct nd_msg *)skb_transport_header(skb);
key->ipv6.nd.target = nd->target;
icmp_len -= sizeof(*nd);
offset = 0;
while (icmp_len >= 8) {
struct nd_opt_hdr *nd_opt =
(struct nd_opt_hdr *)(nd->opt + offset);
int opt_len = nd_opt->nd_opt_len * 8;
if (unlikely(!opt_len || opt_len > icmp_len))
return 0;
/* Store the link layer address if the appropriate
* option is provided. It is considered an error if
* the same link layer option is specified twice.
*/
if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
&& opt_len == 8) {
if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
goto invalid;
ether_addr_copy(key->ipv6.nd.sll,
&nd->opt[offset+sizeof(*nd_opt)]);
} else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
&& opt_len == 8) {
if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
goto invalid;
ether_addr_copy(key->ipv6.nd.tll,
&nd->opt[offset+sizeof(*nd_opt)]);
}
icmp_len -= opt_len;
offset += opt_len;
}
}
return 0;
invalid:
memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
return 0;
}
static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
{
struct nshhdr *nh;
unsigned int nh_ofs = skb_network_offset(skb);
u8 version, length;
int err;
err = check_header(skb, nh_ofs + NSH_BASE_HDR_LEN);
if (unlikely(err))
return err;
nh = nsh_hdr(skb);
version = nsh_get_ver(nh);
length = nsh_hdr_len(nh);
if (version != 0)
return -EINVAL;
err = check_header(skb, nh_ofs + length);
if (unlikely(err))
return err;
nh = nsh_hdr(skb);
key->nsh.base.flags = nsh_get_flags(nh);
key->nsh.base.ttl = nsh_get_ttl(nh);
key->nsh.base.mdtype = nh->mdtype;
key->nsh.base.np = nh->np;
key->nsh.base.path_hdr = nh->path_hdr;
switch (key->nsh.base.mdtype) {
case NSH_M_TYPE1:
if (length != NSH_M_TYPE1_LEN)
return -EINVAL;
memcpy(key->nsh.context, nh->md1.context,
sizeof(nh->md1));
break;
case NSH_M_TYPE2:
memset(key->nsh.context, 0,
sizeof(nh->md1));
break;
default:
return -EINVAL;
}
return 0;
}
/**
* key_extract_l3l4 - extracts L3/L4 header information.
* @skb: sk_buff that contains the frame, with skb->data pointing to the
* L3 header
* @key: output flow key
*
* Return: %0 if successful, otherwise a negative errno value.
*/
static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
{
int error;
/* Network layer. */
if (key->eth.type == htons(ETH_P_IP)) {
struct iphdr *nh;
__be16 offset;
error = check_iphdr(skb);
if (unlikely(error)) {
memset(&key->ip, 0, sizeof(key->ip));
memset(&key->ipv4, 0, sizeof(key->ipv4));
if (error == -EINVAL) {
skb->transport_header = skb->network_header;
error = 0;
}
return error;
}
nh = ip_hdr(skb);
key->ipv4.addr.src = nh->saddr;
key->ipv4.addr.dst = nh->daddr;
key->ip.proto = nh->protocol;
key->ip.tos = nh->tos;
key->ip.ttl = nh->ttl;
offset = nh->frag_off & htons(IP_OFFSET);
if (offset) {
key->ip.frag = OVS_FRAG_TYPE_LATER;
memset(&key->tp, 0, sizeof(key->tp));
return 0;
}
if (nh->frag_off & htons(IP_MF) ||
skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
else
key->ip.frag = OVS_FRAG_TYPE_NONE;
/* Transport layer. */
if (key->ip.proto == IPPROTO_TCP) {
if (tcphdr_ok(skb)) {
struct tcphdr *tcp = tcp_hdr(skb);
key->tp.src = tcp->source;
key->tp.dst = tcp->dest;
key->tp.flags = TCP_FLAGS_BE16(tcp);
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == IPPROTO_UDP) {
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->tp.src = udp->source;
key->tp.dst = udp->dest;
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == IPPROTO_SCTP) {
if (sctphdr_ok(skb)) {
struct sctphdr *sctp = sctp_hdr(skb);
key->tp.src = sctp->source;
key->tp.dst = sctp->dest;
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == IPPROTO_ICMP) {
if (icmphdr_ok(skb)) {
struct icmphdr *icmp = icmp_hdr(skb);
/* The ICMP type and code fields use the 16-bit
* transport port fields, so we need to store
* them in 16-bit network byte order. */
key->tp.src = htons(icmp->type);
key->tp.dst = htons(icmp->code);
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
}
} else if (key->eth.type == htons(ETH_P_ARP) ||
key->eth.type == htons(ETH_P_RARP)) {
struct arp_eth_header *arp;
bool arp_available = arphdr_ok(skb);
arp = (struct arp_eth_header *)skb_network_header(skb);
if (arp_available &&
arp->ar_hrd == htons(ARPHRD_ETHER) &&
arp->ar_pro == htons(ETH_P_IP) &&
arp->ar_hln == ETH_ALEN &&
arp->ar_pln == 4) {
/* We only match on the lower 8 bits of the opcode. */
if (ntohs(arp->ar_op) <= 0xff)
key->ip.proto = ntohs(arp->ar_op);
else
key->ip.proto = 0;
memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
} else {
memset(&key->ip, 0, sizeof(key->ip));
memset(&key->ipv4, 0, sizeof(key->ipv4));
}
} else if (eth_p_mpls(key->eth.type)) {
u8 label_count = 1;
memset(&key->mpls, 0, sizeof(key->mpls));
skb_set_inner_network_header(skb, skb->mac_len);
while (1) {
__be32 lse;
error = check_header(skb, skb->mac_len +
label_count * MPLS_HLEN);
if (unlikely(error))
return 0;
memcpy(&lse, skb_inner_network_header(skb), MPLS_HLEN);
if (label_count <= MPLS_LABEL_DEPTH)
memcpy(&key->mpls.lse[label_count - 1], &lse,
MPLS_HLEN);
skb_set_inner_network_header(skb, skb->mac_len +
label_count * MPLS_HLEN);
if (lse & htonl(MPLS_LS_S_MASK))
break;
label_count++;
}
if (label_count > MPLS_LABEL_DEPTH)
label_count = MPLS_LABEL_DEPTH;
key->mpls.num_labels_mask = GENMASK(label_count - 1, 0);
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
nh_len = parse_ipv6hdr(skb, key);
if (unlikely(nh_len < 0)) {
switch (nh_len) {
case -EINVAL:
memset(&key->ip, 0, sizeof(key->ip));
memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
fallthrough;
case -EPROTO:
skb->transport_header = skb->network_header;
error = 0;
break;
default:
error = nh_len;
}
return error;
}
if (key->ip.frag == OVS_FRAG_TYPE_LATER) {
memset(&key->tp, 0, sizeof(key->tp));
return 0;
}
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
/* Transport layer. */
if (key->ip.proto == NEXTHDR_TCP) {
if (tcphdr_ok(skb)) {
struct tcphdr *tcp = tcp_hdr(skb);
key->tp.src = tcp->source;
key->tp.dst = tcp->dest;
key->tp.flags = TCP_FLAGS_BE16(tcp);
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == NEXTHDR_UDP) {
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->tp.src = udp->source;
key->tp.dst = udp->dest;
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == NEXTHDR_SCTP) {
if (sctphdr_ok(skb)) {
struct sctphdr *sctp = sctp_hdr(skb);
key->tp.src = sctp->source;
key->tp.dst = sctp->dest;
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == NEXTHDR_ICMP) {
if (icmp6hdr_ok(skb)) {
error = parse_icmpv6(skb, key, nh_len);
if (error)
return error;
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
}
} else if (key->eth.type == htons(ETH_P_NSH)) {
error = parse_nsh(skb, key);
if (error)
return error;
}
return 0;
}
/**
* key_extract - extracts a flow key from an Ethernet frame.
* @skb: sk_buff that contains the frame, with skb->data pointing to the
* Ethernet header
* @key: output flow key
*
* The caller must ensure that skb->len >= ETH_HLEN.
*
* Initializes @skb header fields as follows:
*
* - skb->mac_header: the L2 header.
*
* - skb->network_header: just past the L2 header, or just past the
* VLAN header, to the first byte of the L2 payload.
*
* - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
* on output, then just past the IP header, if one is present and
* of a correct length, otherwise the same as skb->network_header.
* For other key->eth.type values it is left untouched.
*
* - skb->protocol: the type of the data starting at skb->network_header.
* Equals to key->eth.type.
*
* Return: %0 if successful, otherwise a negative errno value.
*/
static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
{
struct ethhdr *eth;
/* Flags are always used as part of stats */
key->tp.flags = 0;
skb_reset_mac_header(skb);
/* Link layer. */
clear_vlan(key);
if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
if (unlikely(eth_type_vlan(skb->protocol)))
return -EINVAL;
skb_reset_network_header(skb);
key->eth.type = skb->protocol;
} else {
eth = eth_hdr(skb);
ether_addr_copy(key->eth.src, eth->h_source);
ether_addr_copy(key->eth.dst, eth->h_dest);
__skb_pull(skb, 2 * ETH_ALEN);
/* We are going to push all headers that we pull, so no need to
* update skb->csum here.
*/
if (unlikely(parse_vlan(skb, key)))
return -ENOMEM;
key->eth.type = parse_ethertype(skb);
if (unlikely(key->eth.type == htons(0)))
return -ENOMEM;
/* Multiple tagged packets need to retain TPID to satisfy
* skb_vlan_pop(), which will later shift the ethertype into
* skb->protocol.
*/
if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
skb->protocol = key->eth.cvlan.tpid;
else
skb->protocol = key->eth.type;
skb_reset_network_header(skb);
__skb_push(skb, skb->data - skb_mac_header(skb));
}
skb_reset_mac_len(skb);
/* Fill out L3/L4 key info, if any */
return key_extract_l3l4(skb, key);
}
/* In the case of conntrack fragment handling it expects L3 headers,
* add a helper.
*/
int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
{
return key_extract_l3l4(skb, key);
}
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
{
int res;
res = key_extract(skb, key);
if (!res)
key->mac_proto &= ~SW_FLOW_KEY_INVALID;
return res;
}
static int key_extract_mac_proto(struct sk_buff *skb)
{
switch (skb->dev->type) {
case ARPHRD_ETHER:
return MAC_PROTO_ETHERNET;
case ARPHRD_NONE:
if (skb->protocol == htons(ETH_P_TEB))
return MAC_PROTO_ETHERNET;
return MAC_PROTO_NONE;
}
WARN_ON_ONCE(1);
return -EINVAL;
}
int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
struct sk_buff *skb, struct sw_flow_key *key)
{
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
struct tc_skb_ext *tc_ext;
#endif
bool post_ct = false, post_ct_snat = false, post_ct_dnat = false;
int res, err;
u16 zone = 0;
/* Extract metadata from packet. */
if (tun_info) {
key->tun_proto = ip_tunnel_info_af(tun_info);
memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
if (tun_info->options_len) {
BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
8)) - 1
> sizeof(key->tun_opts));
ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len),
tun_info);
key->tun_opts_len = tun_info->options_len;
} else {
key->tun_opts_len = 0;
}
} else {
key->tun_proto = 0;
key->tun_opts_len = 0;
memset(&key->tun_key, 0, sizeof(key->tun_key));
}
key->phy.priority = skb->priority;
key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
key->phy.skb_mark = skb->mark;
key->ovs_flow_hash = 0;
res = key_extract_mac_proto(skb);
if (res < 0)
return res;
key->mac_proto = res;
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
if (tc_skb_ext_tc_enabled()) {
tc_ext = skb_ext_find(skb, TC_SKB_EXT);
key->recirc_id = tc_ext && !tc_ext->act_miss ?
tc_ext->chain : 0;
OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
post_ct = tc_ext ? tc_ext->post_ct : false;
post_ct_snat = post_ct ? tc_ext->post_ct_snat : false;
post_ct_dnat = post_ct ? tc_ext->post_ct_dnat : false;
zone = post_ct ? tc_ext->zone : 0;
} else {
key->recirc_id = 0;
}
#else
key->recirc_id = 0;
#endif
err = key_extract(skb, key);
if (!err) {
ovs_ct_fill_key(skb, key, post_ct); /* Must be after key_extract(). */
if (post_ct) {
if (!skb_get_nfct(skb)) {
key->ct_zone = zone;
} else {
if (!post_ct_dnat)
key->ct_state &= ~OVS_CS_F_DST_NAT;
if (!post_ct_snat)
key->ct_state &= ~OVS_CS_F_SRC_NAT;
}
}
}
return err;
}
int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
struct sk_buff *skb,
struct sw_flow_key *key, bool log)
{
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
u64 attrs = 0;
int err;
err = parse_flow_nlattrs(attr, a, &attrs, log);
if (err)
return -EINVAL;
/* Extract metadata from netlink attributes. */
err = ovs_nla_get_flow_metadata(net, a, attrs, key, log);
if (err)
return err;
/* key_extract assumes that skb->protocol is set-up for
* layer 3 packets which is the case for other callers,
* in particular packets received from the network stack.
* Here the correct value can be set from the metadata
* extracted above.
* For L2 packet key eth type would be zero. skb protocol
* would be set to correct value later during key-extact.
*/
skb->protocol = key->eth.type;
err = key_extract(skb, key);
if (err)
return err;
/* Check that we have conntrack original direction tuple metadata only
* for packets for which it makes sense. Otherwise the key may be
* corrupted due to overlapping key fields.
*/
if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4) &&
key->eth.type != htons(ETH_P_IP))
return -EINVAL;
if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6) &&
(key->eth.type != htons(ETH_P_IPV6) ||
sw_flow_key_is_nd(key)))
return -EINVAL;
return 0;
}
| linux-master | net/openvswitch/flow.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2012 Nicira, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/if_arp.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/llc.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/openvswitch.h>
#include <linux/export.h>
#include <net/ip_tunnels.h>
#include <net/rtnetlink.h>
#include "datapath.h"
#include "vport.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
static struct vport_ops ovs_netdev_vport_ops;
/* Must be called with rcu_read_lock. */
static void netdev_port_receive(struct sk_buff *skb)
{
struct vport *vport;
vport = ovs_netdev_get_vport(skb->dev);
if (unlikely(!vport))
goto error;
if (unlikely(skb_warn_if_lro(skb)))
goto error;
/* Make our own copy of the packet. Otherwise we will mangle the
* packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
*/
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return;
if (skb->dev->type == ARPHRD_ETHER)
skb_push_rcsum(skb, ETH_HLEN);
ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
return;
error:
kfree_skb(skb);
}
/* Called with rcu_read_lock and bottom-halves disabled. */
static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return RX_HANDLER_PASS;
netdev_port_receive(skb);
return RX_HANDLER_CONSUMED;
}
static struct net_device *get_dpdev(const struct datapath *dp)
{
struct vport *local;
local = ovs_vport_ovsl(dp, OVSP_LOCAL);
return local->dev;
}
struct vport *ovs_netdev_link(struct vport *vport, const char *name)
{
int err;
vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
if (!vport->dev) {
err = -ENODEV;
goto error_free_vport;
}
netdev_tracker_alloc(vport->dev, &vport->dev_tracker, GFP_KERNEL);
if (vport->dev->flags & IFF_LOOPBACK ||
(vport->dev->type != ARPHRD_ETHER &&
vport->dev->type != ARPHRD_NONE) ||
ovs_is_internal_dev(vport->dev)) {
err = -EINVAL;
goto error_put;
}
rtnl_lock();
err = netdev_master_upper_dev_link(vport->dev,
get_dpdev(vport->dp),
NULL, NULL, NULL);
if (err)
goto error_unlock;
err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
vport);
if (err)
goto error_master_upper_dev_unlink;
dev_disable_lro(vport->dev);
dev_set_promiscuity(vport->dev, 1);
vport->dev->priv_flags |= IFF_OVS_DATAPATH;
rtnl_unlock();
return vport;
error_master_upper_dev_unlink:
netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
error_unlock:
rtnl_unlock();
error_put:
netdev_put(vport->dev, &vport->dev_tracker);
error_free_vport:
ovs_vport_free(vport);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ovs_netdev_link);
static struct vport *netdev_create(const struct vport_parms *parms)
{
struct vport *vport;
vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
if (IS_ERR(vport))
return vport;
return ovs_netdev_link(vport, parms->name);
}
static void vport_netdev_free(struct rcu_head *rcu)
{
struct vport *vport = container_of(rcu, struct vport, rcu);
netdev_put(vport->dev, &vport->dev_tracker);
ovs_vport_free(vport);
}
void ovs_netdev_detach_dev(struct vport *vport)
{
ASSERT_RTNL();
vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
netdev_rx_handler_unregister(vport->dev);
netdev_upper_dev_unlink(vport->dev,
netdev_master_upper_dev_get(vport->dev));
dev_set_promiscuity(vport->dev, -1);
}
static void netdev_destroy(struct vport *vport)
{
rtnl_lock();
if (netif_is_ovs_port(vport->dev))
ovs_netdev_detach_dev(vport);
rtnl_unlock();
call_rcu(&vport->rcu, vport_netdev_free);
}
void ovs_netdev_tunnel_destroy(struct vport *vport)
{
rtnl_lock();
if (netif_is_ovs_port(vport->dev))
ovs_netdev_detach_dev(vport);
/* We can be invoked by both explicit vport deletion and
* underlying netdev deregistration; delete the link only
* if it's not already shutting down.
*/
if (vport->dev->reg_state == NETREG_REGISTERED)
rtnl_delete_link(vport->dev, 0, NULL);
netdev_put(vport->dev, &vport->dev_tracker);
vport->dev = NULL;
rtnl_unlock();
call_rcu(&vport->rcu, vport_netdev_free);
}
EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
/* Returns null if this device is not attached to a datapath. */
struct vport *ovs_netdev_get_vport(struct net_device *dev)
{
if (likely(netif_is_ovs_port(dev)))
return (struct vport *)
rcu_dereference_rtnl(dev->rx_handler_data);
else
return NULL;
}
static struct vport_ops ovs_netdev_vport_ops = {
.type = OVS_VPORT_TYPE_NETDEV,
.create = netdev_create,
.destroy = netdev_destroy,
.send = dev_queue_xmit,
};
int __init ovs_netdev_init(void)
{
return ovs_vport_ops_register(&ovs_netdev_vport_ops);
}
void ovs_netdev_exit(void)
{
ovs_vport_ops_unregister(&ovs_netdev_vport_ops);
}
| linux-master | net/openvswitch/vport-netdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2014 Nicira, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/if_tunnel.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/in_route.h>
#include <linux/inetdevice.h>
#include <linux/jhash.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/rculist.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/gre.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/protocol.h>
#include "datapath.h"
#include "vport.h"
#include "vport-netdev.h"
static struct vport_ops ovs_gre_vport_ops;
static struct vport *gre_tnl_create(const struct vport_parms *parms)
{
struct net *net = ovs_dp_get_net(parms->dp);
struct net_device *dev;
struct vport *vport;
int err;
vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
if (IS_ERR(vport))
return vport;
rtnl_lock();
dev = gretap_fb_dev_create(net, parms->name, NET_NAME_USER);
if (IS_ERR(dev)) {
rtnl_unlock();
ovs_vport_free(vport);
return ERR_CAST(dev);
}
err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
rtnl_delete_link(dev, 0, NULL);
rtnl_unlock();
ovs_vport_free(vport);
return ERR_PTR(err);
}
rtnl_unlock();
return vport;
}
static struct vport *gre_create(const struct vport_parms *parms)
{
struct vport *vport;
vport = gre_tnl_create(parms);
if (IS_ERR(vport))
return vport;
return ovs_netdev_link(vport, parms->name);
}
static struct vport_ops ovs_gre_vport_ops = {
.type = OVS_VPORT_TYPE_GRE,
.create = gre_create,
.send = dev_queue_xmit,
.destroy = ovs_netdev_tunnel_destroy,
};
static int __init ovs_gre_tnl_init(void)
{
return ovs_vport_ops_register(&ovs_gre_vport_ops);
}
static void __exit ovs_gre_tnl_exit(void)
{
ovs_vport_ops_unregister(&ovs_gre_vport_ops);
}
module_init(ovs_gre_tnl_init);
module_exit(ovs_gre_tnl_exit);
MODULE_DESCRIPTION("OVS: GRE switching port");
MODULE_LICENSE("GPL");
MODULE_ALIAS("vport-type-3");
| linux-master | net/openvswitch/vport-gre.c |
// SPDX-License-Identifier: GPL-2.0
/* bug in tracepoint.h, it should include this */
#include <linux/module.h>
/* sparse isn't too happy with all macros... */
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "openvswitch_trace.h"
#endif
| linux-master | net/openvswitch/openvswitch_trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2017 Nicira, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "flow.h"
#include "datapath.h"
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <net/llc_pdu.h>
#include <linux/kernel.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/llc.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/rcupdate.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/rculist.h>
#include <net/geneve.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/mpls.h>
#include <net/vxlan.h>
#include <net/tun_proto.h>
#include <net/erspan.h>
#include "drop.h"
#include "flow_netlink.h"
struct ovs_len_tbl {
int len;
const struct ovs_len_tbl *next;
};
#define OVS_ATTR_NESTED -1
#define OVS_ATTR_VARIABLE -2
static bool actions_may_change_flow(const struct nlattr *actions)
{
struct nlattr *nla;
int rem;
nla_for_each_nested(nla, actions, rem) {
u16 action = nla_type(nla);
switch (action) {
case OVS_ACTION_ATTR_OUTPUT:
case OVS_ACTION_ATTR_RECIRC:
case OVS_ACTION_ATTR_TRUNC:
case OVS_ACTION_ATTR_USERSPACE:
case OVS_ACTION_ATTR_DROP:
break;
case OVS_ACTION_ATTR_CT:
case OVS_ACTION_ATTR_CT_CLEAR:
case OVS_ACTION_ATTR_HASH:
case OVS_ACTION_ATTR_POP_ETH:
case OVS_ACTION_ATTR_POP_MPLS:
case OVS_ACTION_ATTR_POP_NSH:
case OVS_ACTION_ATTR_POP_VLAN:
case OVS_ACTION_ATTR_PUSH_ETH:
case OVS_ACTION_ATTR_PUSH_MPLS:
case OVS_ACTION_ATTR_PUSH_NSH:
case OVS_ACTION_ATTR_PUSH_VLAN:
case OVS_ACTION_ATTR_SAMPLE:
case OVS_ACTION_ATTR_SET:
case OVS_ACTION_ATTR_SET_MASKED:
case OVS_ACTION_ATTR_METER:
case OVS_ACTION_ATTR_CHECK_PKT_LEN:
case OVS_ACTION_ATTR_ADD_MPLS:
case OVS_ACTION_ATTR_DEC_TTL:
default:
return true;
}
}
return false;
}
static void update_range(struct sw_flow_match *match,
size_t offset, size_t size, bool is_mask)
{
struct sw_flow_key_range *range;
size_t start = rounddown(offset, sizeof(long));
size_t end = roundup(offset + size, sizeof(long));
if (!is_mask)
range = &match->range;
else
range = &match->mask->range;
if (range->start == range->end) {
range->start = start;
range->end = end;
return;
}
if (range->start > start)
range->start = start;
if (range->end < end)
range->end = end;
}
#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
do { \
update_range(match, offsetof(struct sw_flow_key, field), \
sizeof((match)->key->field), is_mask); \
if (is_mask) \
(match)->mask->key.field = value; \
else \
(match)->key->field = value; \
} while (0)
#define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
do { \
update_range(match, offset, len, is_mask); \
if (is_mask) \
memcpy((u8 *)&(match)->mask->key + offset, value_p, \
len); \
else \
memcpy((u8 *)(match)->key + offset, value_p, len); \
} while (0)
#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
value_p, len, is_mask)
#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
do { \
update_range(match, offsetof(struct sw_flow_key, field), \
sizeof((match)->key->field), is_mask); \
if (is_mask) \
memset((u8 *)&(match)->mask->key.field, value, \
sizeof((match)->mask->key.field)); \
else \
memset((u8 *)&(match)->key->field, value, \
sizeof((match)->key->field)); \
} while (0)
static bool match_validate(const struct sw_flow_match *match,
u64 key_attrs, u64 mask_attrs, bool log)
{
u64 key_expected = 0;
u64 mask_allowed = key_attrs; /* At most allow all key attributes */
/* The following mask attributes allowed only if they
* pass the validation tests. */
mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
| (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)
| (1 << OVS_KEY_ATTR_IPV6)
| (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)
| (1 << OVS_KEY_ATTR_TCP)
| (1 << OVS_KEY_ATTR_TCP_FLAGS)
| (1 << OVS_KEY_ATTR_UDP)
| (1 << OVS_KEY_ATTR_SCTP)
| (1 << OVS_KEY_ATTR_ICMP)
| (1 << OVS_KEY_ATTR_ICMPV6)
| (1 << OVS_KEY_ATTR_ARP)
| (1 << OVS_KEY_ATTR_ND)
| (1 << OVS_KEY_ATTR_MPLS)
| (1 << OVS_KEY_ATTR_NSH));
/* Always allowed mask fields. */
mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
| (1 << OVS_KEY_ATTR_IN_PORT)
| (1 << OVS_KEY_ATTR_ETHERTYPE));
/* Check key attributes. */
if (match->key->eth.type == htons(ETH_P_ARP)
|| match->key->eth.type == htons(ETH_P_RARP)) {
key_expected |= 1 << OVS_KEY_ATTR_ARP;
if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
}
if (eth_p_mpls(match->key->eth.type)) {
key_expected |= 1 << OVS_KEY_ATTR_MPLS;
if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
mask_allowed |= 1 << OVS_KEY_ATTR_MPLS;
}
if (match->key->eth.type == htons(ETH_P_IP)) {
key_expected |= 1 << OVS_KEY_ATTR_IPV4;
if (match->mask && match->mask->key.eth.type == htons(0xffff)) {
mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
mask_allowed |= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4;
}
if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
if (match->key->ip.proto == IPPROTO_UDP) {
key_expected |= 1 << OVS_KEY_ATTR_UDP;
if (match->mask && (match->mask->key.ip.proto == 0xff))
mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
}
if (match->key->ip.proto == IPPROTO_SCTP) {
key_expected |= 1 << OVS_KEY_ATTR_SCTP;
if (match->mask && (match->mask->key.ip.proto == 0xff))
mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
}
if (match->key->ip.proto == IPPROTO_TCP) {
key_expected |= 1 << OVS_KEY_ATTR_TCP;
key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
if (match->mask && (match->mask->key.ip.proto == 0xff)) {
mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
}
}
if (match->key->ip.proto == IPPROTO_ICMP) {
key_expected |= 1 << OVS_KEY_ATTR_ICMP;
if (match->mask && (match->mask->key.ip.proto == 0xff))
mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
}
}
}
if (match->key->eth.type == htons(ETH_P_IPV6)) {
key_expected |= 1 << OVS_KEY_ATTR_IPV6;
if (match->mask && match->mask->key.eth.type == htons(0xffff)) {
mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
mask_allowed |= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6;
}
if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
if (match->key->ip.proto == IPPROTO_UDP) {
key_expected |= 1 << OVS_KEY_ATTR_UDP;
if (match->mask && (match->mask->key.ip.proto == 0xff))
mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
}
if (match->key->ip.proto == IPPROTO_SCTP) {
key_expected |= 1 << OVS_KEY_ATTR_SCTP;
if (match->mask && (match->mask->key.ip.proto == 0xff))
mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
}
if (match->key->ip.proto == IPPROTO_TCP) {
key_expected |= 1 << OVS_KEY_ATTR_TCP;
key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
if (match->mask && (match->mask->key.ip.proto == 0xff)) {
mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
}
}
if (match->key->ip.proto == IPPROTO_ICMPV6) {
key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
if (match->mask && (match->mask->key.ip.proto == 0xff))
mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
if (match->key->tp.src ==
htons(NDISC_NEIGHBOUR_SOLICITATION) ||
match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
key_expected |= 1 << OVS_KEY_ATTR_ND;
/* Original direction conntrack tuple
* uses the same space as the ND fields
* in the key, so both are not allowed
* at the same time.
*/
mask_allowed &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
if (match->mask && (match->mask->key.tp.src == htons(0xff)))
mask_allowed |= 1 << OVS_KEY_ATTR_ND;
}
}
}
}
if (match->key->eth.type == htons(ETH_P_NSH)) {
key_expected |= 1 << OVS_KEY_ATTR_NSH;
if (match->mask &&
match->mask->key.eth.type == htons(0xffff)) {
mask_allowed |= 1 << OVS_KEY_ATTR_NSH;
}
}
if ((key_attrs & key_expected) != key_expected) {
/* Key attributes check failed. */
OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)",
(unsigned long long)key_attrs,
(unsigned long long)key_expected);
return false;
}
if ((mask_attrs & mask_allowed) != mask_attrs) {
/* Mask attributes check failed. */
OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)",
(unsigned long long)mask_attrs,
(unsigned long long)mask_allowed);
return false;
}
return true;
}
size_t ovs_tun_key_attr_size(void)
{
/* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
* updating this function.
*/
return nla_total_size_64bit(8) /* OVS_TUNNEL_KEY_ATTR_ID */
+ nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */
+ nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */
+ nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
+ nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
+ nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
/* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS and
* OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS is mutually exclusive with
* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
*/
+ nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
+ nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
}
static size_t ovs_nsh_key_attr_size(void)
{
/* Whenever adding new OVS_NSH_KEY_ FIELDS, we should consider
* updating this function.
*/
return nla_total_size(NSH_BASE_HDR_LEN) /* OVS_NSH_KEY_ATTR_BASE */
/* OVS_NSH_KEY_ATTR_MD1 and OVS_NSH_KEY_ATTR_MD2 are
* mutually exclusive, so the bigger one can cover
* the small one.
*/
+ nla_total_size(NSH_CTX_HDRS_MAX_LEN);
}
size_t ovs_key_attr_size(void)
{
/* Whenever adding new OVS_KEY_ FIELDS, we should consider
* updating this function.
*/
BUILD_BUG_ON(OVS_KEY_ATTR_MAX != 32);
return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
+ nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
+ ovs_tun_key_attr_size()
+ nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
+ nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
+ nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
+ nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
+ nla_total_size(4) /* OVS_KEY_ATTR_CT_STATE */
+ nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */
+ nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */
+ nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */
+ nla_total_size(40) /* OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6 */
+ nla_total_size(0) /* OVS_KEY_ATTR_NSH */
+ ovs_nsh_key_attr_size()
+ nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
+ nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
+ nla_total_size(4) /* OVS_KEY_ATTR_VLAN */
+ nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
+ nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
+ nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
+ nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
+ nla_total_size(28) /* OVS_KEY_ATTR_ND */
+ nla_total_size(2); /* OVS_KEY_ATTR_IPV6_EXTHDRS */
}
static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = {
[OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) },
};
static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
[OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) },
[OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) },
[OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = sizeof(u32) },
[OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
[OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
[OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
[OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
[OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) },
[OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) },
[OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
[OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE },
[OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED,
.next = ovs_vxlan_ext_key_lens },
[OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
[OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) },
[OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = OVS_ATTR_VARIABLE },
[OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE] = { .len = 0 },
};
static const struct ovs_len_tbl
ovs_nsh_key_attr_lens[OVS_NSH_KEY_ATTR_MAX + 1] = {
[OVS_NSH_KEY_ATTR_BASE] = { .len = sizeof(struct ovs_nsh_key_base) },
[OVS_NSH_KEY_ATTR_MD1] = { .len = sizeof(struct ovs_nsh_key_md1) },
[OVS_NSH_KEY_ATTR_MD2] = { .len = OVS_ATTR_VARIABLE },
};
/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_ENCAP] = { .len = OVS_ATTR_NESTED },
[OVS_KEY_ATTR_PRIORITY] = { .len = sizeof(u32) },
[OVS_KEY_ATTR_IN_PORT] = { .len = sizeof(u32) },
[OVS_KEY_ATTR_SKB_MARK] = { .len = sizeof(u32) },
[OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
[OVS_KEY_ATTR_VLAN] = { .len = sizeof(__be16) },
[OVS_KEY_ATTR_ETHERTYPE] = { .len = sizeof(__be16) },
[OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
[OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
[OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
[OVS_KEY_ATTR_TCP_FLAGS] = { .len = sizeof(__be16) },
[OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
[OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
[OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
[OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
[OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
[OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
[OVS_KEY_ATTR_RECIRC_ID] = { .len = sizeof(u32) },
[OVS_KEY_ATTR_DP_HASH] = { .len = sizeof(u32) },
[OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
.next = ovs_tunnel_key_lens, },
[OVS_KEY_ATTR_MPLS] = { .len = OVS_ATTR_VARIABLE },
[OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) },
[OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) },
[OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) },
[OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = {
.len = sizeof(struct ovs_key_ct_tuple_ipv4) },
[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = {
.len = sizeof(struct ovs_key_ct_tuple_ipv6) },
[OVS_KEY_ATTR_NSH] = { .len = OVS_ATTR_NESTED,
.next = ovs_nsh_key_attr_lens, },
[OVS_KEY_ATTR_IPV6_EXTHDRS] = {
.len = sizeof(struct ovs_key_ipv6_exthdrs) },
};
static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
{
return expected_len == attr_len ||
expected_len == OVS_ATTR_NESTED ||
expected_len == OVS_ATTR_VARIABLE;
}
static bool is_all_zero(const u8 *fp, size_t size)
{
int i;
if (!fp)
return false;
for (i = 0; i < size; i++)
if (fp[i])
return false;
return true;
}
static int __parse_flow_nlattrs(const struct nlattr *attr,
const struct nlattr *a[],
u64 *attrsp, bool log, bool nz)
{
const struct nlattr *nla;
u64 attrs;
int rem;
attrs = *attrsp;
nla_for_each_nested(nla, attr, rem) {
u16 type = nla_type(nla);
int expected_len;
if (type > OVS_KEY_ATTR_MAX) {
OVS_NLERR(log, "Key type %d is out of range max %d",
type, OVS_KEY_ATTR_MAX);
return -EINVAL;
}
if (type == OVS_KEY_ATTR_PACKET_TYPE ||
type == OVS_KEY_ATTR_ND_EXTENSIONS ||
type == OVS_KEY_ATTR_TUNNEL_INFO) {
OVS_NLERR(log, "Key type %d is not supported", type);
return -EINVAL;
}
if (attrs & (1ULL << type)) {
OVS_NLERR(log, "Duplicate key (type %d).", type);
return -EINVAL;
}
expected_len = ovs_key_lens[type].len;
if (!check_attr_len(nla_len(nla), expected_len)) {
OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
type, nla_len(nla), expected_len);
return -EINVAL;
}
if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
attrs |= 1ULL << type;
a[type] = nla;
}
}
if (rem) {
OVS_NLERR(log, "Message has %d unknown bytes.", rem);
return -EINVAL;
}
*attrsp = attrs;
return 0;
}
static int parse_flow_mask_nlattrs(const struct nlattr *attr,
const struct nlattr *a[], u64 *attrsp,
bool log)
{
return __parse_flow_nlattrs(attr, a, attrsp, log, true);
}
int parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[],
u64 *attrsp, bool log)
{
return __parse_flow_nlattrs(attr, a, attrsp, log, false);
}
static int genev_tun_opt_from_nlattr(const struct nlattr *a,
struct sw_flow_match *match, bool is_mask,
bool log)
{
unsigned long opt_key_offset;
if (nla_len(a) > sizeof(match->key->tun_opts)) {
OVS_NLERR(log, "Geneve option length err (len %d, max %zu).",
nla_len(a), sizeof(match->key->tun_opts));
return -EINVAL;
}
if (nla_len(a) % 4 != 0) {
OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.",
nla_len(a));
return -EINVAL;
}
/* We need to record the length of the options passed
* down, otherwise packets with the same format but
* additional options will be silently matched.
*/
if (!is_mask) {
SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
false);
} else {
/* This is somewhat unusual because it looks at
* both the key and mask while parsing the
* attributes (and by extension assumes the key
* is parsed first). Normally, we would verify
* that each is the correct length and that the
* attributes line up in the validate function.
* However, that is difficult because this is
* variable length and we won't have the
* information later.
*/
if (match->key->tun_opts_len != nla_len(a)) {
OVS_NLERR(log, "Geneve option len %d != mask len %d",
match->key->tun_opts_len, nla_len(a));
return -EINVAL;
}
SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
}
opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
nla_len(a), is_mask);
return 0;
}
static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool log)
{
struct nlattr *a;
int rem;
unsigned long opt_key_offset;
struct vxlan_metadata opts;
BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
memset(&opts, 0, sizeof(opts));
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
if (type > OVS_VXLAN_EXT_MAX) {
OVS_NLERR(log, "VXLAN extension %d out of range max %d",
type, OVS_VXLAN_EXT_MAX);
return -EINVAL;
}
if (!check_attr_len(nla_len(a),
ovs_vxlan_ext_key_lens[type].len)) {
OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d",
type, nla_len(a),
ovs_vxlan_ext_key_lens[type].len);
return -EINVAL;
}
switch (type) {
case OVS_VXLAN_EXT_GBP:
opts.gbp = nla_get_u32(a);
break;
default:
OVS_NLERR(log, "Unknown VXLAN extension attribute %d",
type);
return -EINVAL;
}
}
if (rem) {
OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.",
rem);
return -EINVAL;
}
if (!is_mask)
SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
else
SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
is_mask);
return 0;
}
static int erspan_tun_opt_from_nlattr(const struct nlattr *a,
struct sw_flow_match *match, bool is_mask,
bool log)
{
unsigned long opt_key_offset;
BUILD_BUG_ON(sizeof(struct erspan_metadata) >
sizeof(match->key->tun_opts));
if (nla_len(a) > sizeof(match->key->tun_opts)) {
OVS_NLERR(log, "ERSPAN option length err (len %d, max %zu).",
nla_len(a), sizeof(match->key->tun_opts));
return -EINVAL;
}
if (!is_mask)
SW_FLOW_KEY_PUT(match, tun_opts_len,
sizeof(struct erspan_metadata), false);
else
SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
nla_len(a), is_mask);
return 0;
}
static int ip_tun_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool log)
{
bool ttl = false, ipv4 = false, ipv6 = false;
bool info_bridge_mode = false;
__be16 tun_flags = 0;
int opts_type = 0;
struct nlattr *a;
int rem;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
int err;
if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
OVS_NLERR(log, "Tunnel attr %d out of range max %d",
type, OVS_TUNNEL_KEY_ATTR_MAX);
return -EINVAL;
}
if (!check_attr_len(nla_len(a),
ovs_tunnel_key_lens[type].len)) {
OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
type, nla_len(a), ovs_tunnel_key_lens[type].len);
return -EINVAL;
}
switch (type) {
case OVS_TUNNEL_KEY_ATTR_ID:
SW_FLOW_KEY_PUT(match, tun_key.tun_id,
nla_get_be64(a), is_mask);
tun_flags |= TUNNEL_KEY;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
nla_get_in_addr(a), is_mask);
ipv4 = true;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
nla_get_in_addr(a), is_mask);
ipv4 = true;
break;
case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
nla_get_in6_addr(a), is_mask);
ipv6 = true;
break;
case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
nla_get_in6_addr(a), is_mask);
ipv6 = true;
break;
case OVS_TUNNEL_KEY_ATTR_TOS:
SW_FLOW_KEY_PUT(match, tun_key.tos,
nla_get_u8(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TTL:
SW_FLOW_KEY_PUT(match, tun_key.ttl,
nla_get_u8(a), is_mask);
ttl = true;
break;
case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
tun_flags |= TUNNEL_DONT_FRAGMENT;
break;
case OVS_TUNNEL_KEY_ATTR_CSUM:
tun_flags |= TUNNEL_CSUM;
break;
case OVS_TUNNEL_KEY_ATTR_TP_SRC:
SW_FLOW_KEY_PUT(match, tun_key.tp_src,
nla_get_be16(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TP_DST:
SW_FLOW_KEY_PUT(match, tun_key.tp_dst,
nla_get_be16(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_OAM:
tun_flags |= TUNNEL_OAM;
break;
case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
if (opts_type) {
OVS_NLERR(log, "Multiple metadata blocks provided");
return -EINVAL;
}
err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
if (err)
return err;
tun_flags |= TUNNEL_GENEVE_OPT;
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
if (opts_type) {
OVS_NLERR(log, "Multiple metadata blocks provided");
return -EINVAL;
}
err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log);
if (err)
return err;
tun_flags |= TUNNEL_VXLAN_OPT;
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_PAD:
break;
case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
if (opts_type) {
OVS_NLERR(log, "Multiple metadata blocks provided");
return -EINVAL;
}
err = erspan_tun_opt_from_nlattr(a, match, is_mask,
log);
if (err)
return err;
tun_flags |= TUNNEL_ERSPAN_OPT;
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE:
info_bridge_mode = true;
ipv4 = true;
break;
default:
OVS_NLERR(log, "Unknown IP tunnel attribute %d",
type);
return -EINVAL;
}
}
SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
if (is_mask)
SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true);
else
SW_FLOW_KEY_PUT(match, tun_proto, ipv6 ? AF_INET6 : AF_INET,
false);
if (rem > 0) {
OVS_NLERR(log, "IP tunnel attribute has %d unknown bytes.",
rem);
return -EINVAL;
}
if (ipv4 && ipv6) {
OVS_NLERR(log, "Mixed IPv4 and IPv6 tunnel attributes");
return -EINVAL;
}
if (!is_mask) {
if (!ipv4 && !ipv6) {
OVS_NLERR(log, "IP tunnel dst address not specified");
return -EINVAL;
}
if (ipv4) {
if (info_bridge_mode) {
if (match->key->tun_key.u.ipv4.src ||
match->key->tun_key.u.ipv4.dst ||
match->key->tun_key.tp_src ||
match->key->tun_key.tp_dst ||
match->key->tun_key.ttl ||
match->key->tun_key.tos ||
tun_flags & ~TUNNEL_KEY) {
OVS_NLERR(log, "IPv4 tun info is not correct");
return -EINVAL;
}
} else if (!match->key->tun_key.u.ipv4.dst) {
OVS_NLERR(log, "IPv4 tunnel dst address is zero");
return -EINVAL;
}
}
if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) {
OVS_NLERR(log, "IPv6 tunnel dst address is zero");
return -EINVAL;
}
if (!ttl && !info_bridge_mode) {
OVS_NLERR(log, "IP tunnel TTL not specified.");
return -EINVAL;
}
}
return opts_type;
}
static int vxlan_opt_to_nlattr(struct sk_buff *skb,
const void *tun_opts, int swkey_tun_opts_len)
{
const struct vxlan_metadata *opts = tun_opts;
struct nlattr *nla;
nla = nla_nest_start_noflag(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
if (!nla)
return -EMSGSIZE;
if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0)
return -EMSGSIZE;
nla_nest_end(skb, nla);
return 0;
}
static int __ip_tun_to_nlattr(struct sk_buff *skb,
const struct ip_tunnel_key *output,
const void *tun_opts, int swkey_tun_opts_len,
unsigned short tun_proto, u8 mode)
{
if (output->tun_flags & TUNNEL_KEY &&
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
OVS_TUNNEL_KEY_ATTR_PAD))
return -EMSGSIZE;
if (mode & IP_TUNNEL_INFO_BRIDGE)
return nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE)
? -EMSGSIZE : 0;
switch (tun_proto) {
case AF_INET:
if (output->u.ipv4.src &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
output->u.ipv4.src))
return -EMSGSIZE;
if (output->u.ipv4.dst &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
output->u.ipv4.dst))
return -EMSGSIZE;
break;
case AF_INET6:
if (!ipv6_addr_any(&output->u.ipv6.src) &&
nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
&output->u.ipv6.src))
return -EMSGSIZE;
if (!ipv6_addr_any(&output->u.ipv6.dst) &&
nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
&output->u.ipv6.dst))
return -EMSGSIZE;
break;
}
if (output->tos &&
nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
return -EMSGSIZE;
if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_CSUM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
return -EMSGSIZE;
if (output->tp_src &&
nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
return -EMSGSIZE;
if (output->tp_dst &&
nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_OAM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
if (swkey_tun_opts_len) {
if (output->tun_flags & TUNNEL_GENEVE_OPT &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
return -EMSGSIZE;
else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
}
return 0;
}
static int ip_tun_to_nlattr(struct sk_buff *skb,
const struct ip_tunnel_key *output,
const void *tun_opts, int swkey_tun_opts_len,
unsigned short tun_proto, u8 mode)
{
struct nlattr *nla;
int err;
nla = nla_nest_start_noflag(skb, OVS_KEY_ATTR_TUNNEL);
if (!nla)
return -EMSGSIZE;
err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len,
tun_proto, mode);
if (err)
return err;
nla_nest_end(skb, nla);
return 0;
}
int ovs_nla_put_tunnel_info(struct sk_buff *skb,
struct ip_tunnel_info *tun_info)
{
return __ip_tun_to_nlattr(skb, &tun_info->key,
ip_tunnel_info_opts(tun_info),
tun_info->options_len,
ip_tunnel_info_af(tun_info), tun_info->mode);
}
static int encode_vlan_from_nlattrs(struct sw_flow_match *match,
const struct nlattr *a[],
bool is_mask, bool inner)
{
__be16 tci = 0;
__be16 tpid = 0;
if (a[OVS_KEY_ATTR_VLAN])
tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
if (a[OVS_KEY_ATTR_ETHERTYPE])
tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
if (likely(!inner)) {
SW_FLOW_KEY_PUT(match, eth.vlan.tpid, tpid, is_mask);
SW_FLOW_KEY_PUT(match, eth.vlan.tci, tci, is_mask);
} else {
SW_FLOW_KEY_PUT(match, eth.cvlan.tpid, tpid, is_mask);
SW_FLOW_KEY_PUT(match, eth.cvlan.tci, tci, is_mask);
}
return 0;
}
static int validate_vlan_from_nlattrs(const struct sw_flow_match *match,
u64 key_attrs, bool inner,
const struct nlattr **a, bool log)
{
__be16 tci = 0;
if (!((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
(key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
eth_type_vlan(nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE])))) {
/* Not a VLAN. */
return 0;
}
if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
(key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
OVS_NLERR(log, "Invalid %s frame", (inner) ? "C-VLAN" : "VLAN");
return -EINVAL;
}
if (a[OVS_KEY_ATTR_VLAN])
tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
if (!(tci & htons(VLAN_CFI_MASK))) {
if (tci) {
OVS_NLERR(log, "%s TCI does not have VLAN_CFI_MASK bit set.",
(inner) ? "C-VLAN" : "VLAN");
return -EINVAL;
} else if (nla_len(a[OVS_KEY_ATTR_ENCAP])) {
/* Corner case for truncated VLAN header. */
OVS_NLERR(log, "Truncated %s header has non-zero encap attribute.",
(inner) ? "C-VLAN" : "VLAN");
return -EINVAL;
}
}
return 1;
}
static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match *match,
u64 key_attrs, bool inner,
const struct nlattr **a, bool log)
{
__be16 tci = 0;
__be16 tpid = 0;
bool encap_valid = !!(match->key->eth.vlan.tci &
htons(VLAN_CFI_MASK));
bool i_encap_valid = !!(match->key->eth.cvlan.tci &
htons(VLAN_CFI_MASK));
if (!(key_attrs & (1 << OVS_KEY_ATTR_ENCAP))) {
/* Not a VLAN. */
return 0;
}
if ((!inner && !encap_valid) || (inner && !i_encap_valid)) {
OVS_NLERR(log, "Encap mask attribute is set for non-%s frame.",
(inner) ? "C-VLAN" : "VLAN");
return -EINVAL;
}
if (a[OVS_KEY_ATTR_VLAN])
tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
if (a[OVS_KEY_ATTR_ETHERTYPE])
tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
if (tpid != htons(0xffff)) {
OVS_NLERR(log, "Must have an exact match on %s TPID (mask=%x).",
(inner) ? "C-VLAN" : "VLAN", ntohs(tpid));
return -EINVAL;
}
if (!(tci & htons(VLAN_CFI_MASK))) {
OVS_NLERR(log, "%s TCI mask does not have exact match for VLAN_CFI_MASK bit.",
(inner) ? "C-VLAN" : "VLAN");
return -EINVAL;
}
return 1;
}
static int __parse_vlan_from_nlattrs(struct sw_flow_match *match,
u64 *key_attrs, bool inner,
const struct nlattr **a, bool is_mask,
bool log)
{
int err;
const struct nlattr *encap;
if (!is_mask)
err = validate_vlan_from_nlattrs(match, *key_attrs, inner,
a, log);
else
err = validate_vlan_mask_from_nlattrs(match, *key_attrs, inner,
a, log);
if (err <= 0)
return err;
err = encode_vlan_from_nlattrs(match, a, is_mask, inner);
if (err)
return err;
*key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
*key_attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
*key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
encap = a[OVS_KEY_ATTR_ENCAP];
if (!is_mask)
err = parse_flow_nlattrs(encap, a, key_attrs, log);
else
err = parse_flow_mask_nlattrs(encap, a, key_attrs, log);
return err;
}
static int parse_vlan_from_nlattrs(struct sw_flow_match *match,
u64 *key_attrs, const struct nlattr **a,
bool is_mask, bool log)
{
int err;
bool encap_valid = false;
err = __parse_vlan_from_nlattrs(match, key_attrs, false, a,
is_mask, log);
if (err)
return err;
encap_valid = !!(match->key->eth.vlan.tci & htons(VLAN_CFI_MASK));
if (encap_valid) {
err = __parse_vlan_from_nlattrs(match, key_attrs, true, a,
is_mask, log);
if (err)
return err;
}
return 0;
}
static int parse_eth_type_from_nlattrs(struct sw_flow_match *match,
u64 *attrs, const struct nlattr **a,
bool is_mask, bool log)
{
__be16 eth_type;
eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
if (is_mask) {
/* Always exact match EtherType. */
eth_type = htons(0xffff);
} else if (!eth_proto_is_802_3(eth_type)) {
OVS_NLERR(log, "EtherType %x is less than min %x",
ntohs(eth_type), ETH_P_802_3_MIN);
return -EINVAL;
}
SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
*attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
return 0;
}
static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
u64 *attrs, const struct nlattr **a,
bool is_mask, bool log)
{
u8 mac_proto = MAC_PROTO_ETHERNET;
if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) {
u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask);
*attrs &= ~(1 << OVS_KEY_ATTR_DP_HASH);
}
if (*attrs & (1 << OVS_KEY_ATTR_RECIRC_ID)) {
u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]);
SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask);
*attrs &= ~(1 << OVS_KEY_ATTR_RECIRC_ID);
}
if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
SW_FLOW_KEY_PUT(match, phy.priority,
nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
*attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
}
if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
if (is_mask) {
in_port = 0xffffffff; /* Always exact match in_port. */
} else if (in_port >= DP_MAX_PORTS) {
OVS_NLERR(log, "Port %d exceeds max allowable %d",
in_port, DP_MAX_PORTS);
return -EINVAL;
}
SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
*attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
} else if (!is_mask) {
SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
}
if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
*attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
}
if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
if (ip_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
is_mask, log) < 0)
return -EINVAL;
*attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
}
if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
if (ct_state & ~CT_SUPPORTED_MASK) {
OVS_NLERR(log, "ct_state flags %08x unsupported",
ct_state);
return -EINVAL;
}
SW_FLOW_KEY_PUT(match, ct_state, ct_state, is_mask);
*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
}
if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) &&
ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) {
u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]);
SW_FLOW_KEY_PUT(match, ct_zone, ct_zone, is_mask);
*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE);
}
if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) &&
ovs_ct_verify(net, OVS_KEY_ATTR_CT_MARK)) {
u32 mark = nla_get_u32(a[OVS_KEY_ATTR_CT_MARK]);
SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
}
if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
const struct ovs_key_ct_labels *cl;
cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
sizeof(*cl), is_mask);
*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
}
if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) {
const struct ovs_key_ct_tuple_ipv4 *ct;
ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]);
SW_FLOW_KEY_PUT(match, ipv4.ct_orig.src, ct->ipv4_src, is_mask);
SW_FLOW_KEY_PUT(match, ipv4.ct_orig.dst, ct->ipv4_dst, is_mask);
SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask);
SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask);
SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv4_proto, is_mask);
*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4);
}
if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) {
const struct ovs_key_ct_tuple_ipv6 *ct;
ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]);
SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.src, &ct->ipv6_src,
sizeof(match->key->ipv6.ct_orig.src),
is_mask);
SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.dst, &ct->ipv6_dst,
sizeof(match->key->ipv6.ct_orig.dst),
is_mask);
SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask);
SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask);
SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv6_proto, is_mask);
*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
}
/* For layer 3 packets the Ethernet type is provided
* and treated as metadata but no MAC addresses are provided.
*/
if (!(*attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) &&
(*attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)))
mac_proto = MAC_PROTO_NONE;
/* Always exact match mac_proto */
SW_FLOW_KEY_PUT(match, mac_proto, is_mask ? 0xff : mac_proto, is_mask);
if (mac_proto == MAC_PROTO_NONE)
return parse_eth_type_from_nlattrs(match, attrs, a, is_mask,
log);
return 0;
}
int nsh_hdr_from_nlattr(const struct nlattr *attr,
struct nshhdr *nh, size_t size)
{
struct nlattr *a;
int rem;
u8 flags = 0;
u8 ttl = 0;
int mdlen = 0;
/* validate_nsh has check this, so we needn't do duplicate check here
*/
if (size < NSH_BASE_HDR_LEN)
return -ENOBUFS;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
switch (type) {
case OVS_NSH_KEY_ATTR_BASE: {
const struct ovs_nsh_key_base *base = nla_data(a);
flags = base->flags;
ttl = base->ttl;
nh->np = base->np;
nh->mdtype = base->mdtype;
nh->path_hdr = base->path_hdr;
break;
}
case OVS_NSH_KEY_ATTR_MD1:
mdlen = nla_len(a);
if (mdlen > size - NSH_BASE_HDR_LEN)
return -ENOBUFS;
memcpy(&nh->md1, nla_data(a), mdlen);
break;
case OVS_NSH_KEY_ATTR_MD2:
mdlen = nla_len(a);
if (mdlen > size - NSH_BASE_HDR_LEN)
return -ENOBUFS;
memcpy(&nh->md2, nla_data(a), mdlen);
break;
default:
return -EINVAL;
}
}
/* nsh header length = NSH_BASE_HDR_LEN + mdlen */
nh->ver_flags_ttl_len = 0;
nsh_set_flags_ttl_len(nh, flags, ttl, NSH_BASE_HDR_LEN + mdlen);
return 0;
}
int nsh_key_from_nlattr(const struct nlattr *attr,
struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask)
{
struct nlattr *a;
int rem;
/* validate_nsh has check this, so we needn't do duplicate check here
*/
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
switch (type) {
case OVS_NSH_KEY_ATTR_BASE: {
const struct ovs_nsh_key_base *base = nla_data(a);
const struct ovs_nsh_key_base *base_mask = base + 1;
nsh->base = *base;
nsh_mask->base = *base_mask;
break;
}
case OVS_NSH_KEY_ATTR_MD1: {
const struct ovs_nsh_key_md1 *md1 = nla_data(a);
const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
memcpy(nsh->context, md1->context, sizeof(*md1));
memcpy(nsh_mask->context, md1_mask->context,
sizeof(*md1_mask));
break;
}
case OVS_NSH_KEY_ATTR_MD2:
/* Not supported yet */
return -ENOTSUPP;
default:
return -EINVAL;
}
}
return 0;
}
static int nsh_key_put_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool is_push_nsh, bool log)
{
struct nlattr *a;
int rem;
bool has_base = false;
bool has_md1 = false;
bool has_md2 = false;
u8 mdtype = 0;
int mdlen = 0;
if (WARN_ON(is_push_nsh && is_mask))
return -EINVAL;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
int i;
if (type > OVS_NSH_KEY_ATTR_MAX) {
OVS_NLERR(log, "nsh attr %d is out of range max %d",
type, OVS_NSH_KEY_ATTR_MAX);
return -EINVAL;
}
if (!check_attr_len(nla_len(a),
ovs_nsh_key_attr_lens[type].len)) {
OVS_NLERR(
log,
"nsh attr %d has unexpected len %d expected %d",
type,
nla_len(a),
ovs_nsh_key_attr_lens[type].len
);
return -EINVAL;
}
switch (type) {
case OVS_NSH_KEY_ATTR_BASE: {
const struct ovs_nsh_key_base *base = nla_data(a);
has_base = true;
mdtype = base->mdtype;
SW_FLOW_KEY_PUT(match, nsh.base.flags,
base->flags, is_mask);
SW_FLOW_KEY_PUT(match, nsh.base.ttl,
base->ttl, is_mask);
SW_FLOW_KEY_PUT(match, nsh.base.mdtype,
base->mdtype, is_mask);
SW_FLOW_KEY_PUT(match, nsh.base.np,
base->np, is_mask);
SW_FLOW_KEY_PUT(match, nsh.base.path_hdr,
base->path_hdr, is_mask);
break;
}
case OVS_NSH_KEY_ATTR_MD1: {
const struct ovs_nsh_key_md1 *md1 = nla_data(a);
has_md1 = true;
for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++)
SW_FLOW_KEY_PUT(match, nsh.context[i],
md1->context[i], is_mask);
break;
}
case OVS_NSH_KEY_ATTR_MD2:
if (!is_push_nsh) /* Not supported MD type 2 yet */
return -ENOTSUPP;
has_md2 = true;
mdlen = nla_len(a);
if (mdlen > NSH_CTX_HDRS_MAX_LEN || mdlen <= 0) {
OVS_NLERR(
log,
"Invalid MD length %d for MD type %d",
mdlen,
mdtype
);
return -EINVAL;
}
break;
default:
OVS_NLERR(log, "Unknown nsh attribute %d",
type);
return -EINVAL;
}
}
if (rem > 0) {
OVS_NLERR(log, "nsh attribute has %d unknown bytes.", rem);
return -EINVAL;
}
if (has_md1 && has_md2) {
OVS_NLERR(
1,
"invalid nsh attribute: md1 and md2 are exclusive."
);
return -EINVAL;
}
if (!is_mask) {
if ((has_md1 && mdtype != NSH_M_TYPE1) ||
(has_md2 && mdtype != NSH_M_TYPE2)) {
OVS_NLERR(1, "nsh attribute has unmatched MD type %d.",
mdtype);
return -EINVAL;
}
if (is_push_nsh &&
(!has_base || (!has_md1 && !has_md2))) {
OVS_NLERR(
1,
"push_nsh: missing base or metadata attributes"
);
return -EINVAL;
}
}
return 0;
}
static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
u64 attrs, const struct nlattr **a,
bool is_mask, bool log)
{
int err;
err = metadata_from_nlattrs(net, match, &attrs, a, is_mask, log);
if (err)
return err;
if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
const struct ovs_key_ethernet *eth_key;
eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
SW_FLOW_KEY_MEMCPY(match, eth.src,
eth_key->eth_src, ETH_ALEN, is_mask);
SW_FLOW_KEY_MEMCPY(match, eth.dst,
eth_key->eth_dst, ETH_ALEN, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
/* VLAN attribute is always parsed before getting here since it
* may occur multiple times.
*/
OVS_NLERR(log, "VLAN attribute unexpected.");
return -EINVAL;
}
if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
err = parse_eth_type_from_nlattrs(match, &attrs, a, is_mask,
log);
if (err)
return err;
} else if (!is_mask) {
SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
}
} else if (!match->key->eth.type) {
OVS_NLERR(log, "Either Ethernet header or EtherType is required.");
return -EINVAL;
}
if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
const struct ovs_key_ipv4 *ipv4_key;
ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
OVS_NLERR(log, "IPv4 frag type %d is out of range max %d",
ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
return -EINVAL;
}
SW_FLOW_KEY_PUT(match, ip.proto,
ipv4_key->ipv4_proto, is_mask);
SW_FLOW_KEY_PUT(match, ip.tos,
ipv4_key->ipv4_tos, is_mask);
SW_FLOW_KEY_PUT(match, ip.ttl,
ipv4_key->ipv4_ttl, is_mask);
SW_FLOW_KEY_PUT(match, ip.frag,
ipv4_key->ipv4_frag, is_mask);
SW_FLOW_KEY_PUT(match, ipv4.addr.src,
ipv4_key->ipv4_src, is_mask);
SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
ipv4_key->ipv4_dst, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
}
if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
const struct ovs_key_ipv6 *ipv6_key;
ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
OVS_NLERR(log, "IPv6 frag type %d is out of range max %d",
ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
return -EINVAL;
}
if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
OVS_NLERR(log, "IPv6 flow label %x is out of range (max=%x)",
ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
return -EINVAL;
}
SW_FLOW_KEY_PUT(match, ipv6.label,
ipv6_key->ipv6_label, is_mask);
SW_FLOW_KEY_PUT(match, ip.proto,
ipv6_key->ipv6_proto, is_mask);
SW_FLOW_KEY_PUT(match, ip.tos,
ipv6_key->ipv6_tclass, is_mask);
SW_FLOW_KEY_PUT(match, ip.ttl,
ipv6_key->ipv6_hlimit, is_mask);
SW_FLOW_KEY_PUT(match, ip.frag,
ipv6_key->ipv6_frag, is_mask);
SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
ipv6_key->ipv6_src,
sizeof(match->key->ipv6.addr.src),
is_mask);
SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
ipv6_key->ipv6_dst,
sizeof(match->key->ipv6.addr.dst),
is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
}
if (attrs & (1ULL << OVS_KEY_ATTR_IPV6_EXTHDRS)) {
const struct ovs_key_ipv6_exthdrs *ipv6_exthdrs_key;
ipv6_exthdrs_key = nla_data(a[OVS_KEY_ATTR_IPV6_EXTHDRS]);
SW_FLOW_KEY_PUT(match, ipv6.exthdrs,
ipv6_exthdrs_key->hdrs, is_mask);
attrs &= ~(1ULL << OVS_KEY_ATTR_IPV6_EXTHDRS);
}
if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
const struct ovs_key_arp *arp_key;
arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).",
arp_key->arp_op);
return -EINVAL;
}
SW_FLOW_KEY_PUT(match, ipv4.addr.src,
arp_key->arp_sip, is_mask);
SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
arp_key->arp_tip, is_mask);
SW_FLOW_KEY_PUT(match, ip.proto,
ntohs(arp_key->arp_op), is_mask);
SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
arp_key->arp_sha, ETH_ALEN, is_mask);
SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
arp_key->arp_tha, ETH_ALEN, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_ARP);
}
if (attrs & (1 << OVS_KEY_ATTR_NSH)) {
if (nsh_key_put_from_nlattr(a[OVS_KEY_ATTR_NSH], match,
is_mask, false, log) < 0)
return -EINVAL;
attrs &= ~(1 << OVS_KEY_ATTR_NSH);
}
if (attrs & (1 << OVS_KEY_ATTR_MPLS)) {
const struct ovs_key_mpls *mpls_key;
u32 hdr_len;
u32 label_count, label_count_mask, i;
mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
hdr_len = nla_len(a[OVS_KEY_ATTR_MPLS]);
label_count = hdr_len / sizeof(struct ovs_key_mpls);
if (label_count == 0 || label_count > MPLS_LABEL_DEPTH ||
hdr_len % sizeof(struct ovs_key_mpls))
return -EINVAL;
label_count_mask = GENMASK(label_count - 1, 0);
for (i = 0 ; i < label_count; i++)
SW_FLOW_KEY_PUT(match, mpls.lse[i],
mpls_key[i].mpls_lse, is_mask);
SW_FLOW_KEY_PUT(match, mpls.num_labels_mask,
label_count_mask, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_MPLS);
}
if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
const struct ovs_key_tcp *tcp_key;
tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_TCP);
}
if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
SW_FLOW_KEY_PUT(match, tp.flags,
nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS);
}
if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
const struct ovs_key_udp *udp_key;
udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_UDP);
}
if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
const struct ovs_key_sctp *sctp_key;
sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
}
if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
const struct ovs_key_icmp *icmp_key;
icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
SW_FLOW_KEY_PUT(match, tp.src,
htons(icmp_key->icmp_type), is_mask);
SW_FLOW_KEY_PUT(match, tp.dst,
htons(icmp_key->icmp_code), is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
}
if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
const struct ovs_key_icmpv6 *icmpv6_key;
icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
SW_FLOW_KEY_PUT(match, tp.src,
htons(icmpv6_key->icmpv6_type), is_mask);
SW_FLOW_KEY_PUT(match, tp.dst,
htons(icmpv6_key->icmpv6_code), is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
}
if (attrs & (1 << OVS_KEY_ATTR_ND)) {
const struct ovs_key_nd *nd_key;
nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
nd_key->nd_target,
sizeof(match->key->ipv6.nd.target),
is_mask);
SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
nd_key->nd_sll, ETH_ALEN, is_mask);
SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
nd_key->nd_tll, ETH_ALEN, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_ND);
}
if (attrs != 0) {
OVS_NLERR(log, "Unknown key attributes %llx",
(unsigned long long)attrs);
return -EINVAL;
}
return 0;
}
static void nlattr_set(struct nlattr *attr, u8 val,
const struct ovs_len_tbl *tbl)
{
struct nlattr *nla;
int rem;
/* The nlattr stream should already have been validated */
nla_for_each_nested(nla, attr, rem) {
if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
else
memset(nla_data(nla), val, nla_len(nla));
if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
*(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
}
}
static void mask_set_nlattr(struct nlattr *attr, u8 val)
{
nlattr_set(attr, val, ovs_key_lens);
}
/**
* ovs_nla_get_match - parses Netlink attributes into a flow key and
* mask. In case the 'mask' is NULL, the flow is treated as exact match
* flow. Otherwise, it is treated as a wildcarded flow, except the mask
* does not include any don't care bit.
* @net: Used to determine per-namespace field support.
* @match: receives the extracted flow match information.
* @nla_key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* sequence. The fields should of the packet that triggered the creation
* of this flow.
* @nla_mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_*
* Netlink attribute specifies the mask field of the wildcarded flow.
* @log: Boolean to allow kernel error logging. Normally true, but when
* probing for feature compatibility this should be passed in as false to
* suppress unnecessary error logging.
*/
int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
const struct nlattr *nla_key,
const struct nlattr *nla_mask,
bool log)
{
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
struct nlattr *newmask = NULL;
u64 key_attrs = 0;
u64 mask_attrs = 0;
int err;
err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
if (err)
return err;
err = parse_vlan_from_nlattrs(match, &key_attrs, a, false, log);
if (err)
return err;
err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
if (err)
return err;
if (match->mask) {
if (!nla_mask) {
/* Create an exact match mask. We need to set to 0xff
* all the 'match->mask' fields that have been touched
* in 'match->key'. We cannot simply memset
* 'match->mask', because padding bytes and fields not
* specified in 'match->key' should be left to 0.
* Instead, we use a stream of netlink attributes,
* copied from 'key' and set to 0xff.
* ovs_key_from_nlattrs() will take care of filling
* 'match->mask' appropriately.
*/
newmask = kmemdup(nla_key,
nla_total_size(nla_len(nla_key)),
GFP_KERNEL);
if (!newmask)
return -ENOMEM;
mask_set_nlattr(newmask, 0xff);
/* The userspace does not send tunnel attributes that
* are 0, but we should not wildcard them nonetheless.
*/
if (match->key->tun_proto)
SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
0xff, true);
nla_mask = newmask;
}
err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log);
if (err)
goto free_newmask;
/* Always match on tci. */
SW_FLOW_KEY_PUT(match, eth.vlan.tci, htons(0xffff), true);
SW_FLOW_KEY_PUT(match, eth.cvlan.tci, htons(0xffff), true);
err = parse_vlan_from_nlattrs(match, &mask_attrs, a, true, log);
if (err)
goto free_newmask;
err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
log);
if (err)
goto free_newmask;
}
if (!match_validate(match, key_attrs, mask_attrs, log))
err = -EINVAL;
free_newmask:
kfree(newmask);
return err;
}
static size_t get_ufid_len(const struct nlattr *attr, bool log)
{
size_t len;
if (!attr)
return 0;
len = nla_len(attr);
if (len < 1 || len > MAX_UFID_LENGTH) {
OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)",
nla_len(attr), MAX_UFID_LENGTH);
return 0;
}
return len;
}
/* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID,
* or false otherwise.
*/
bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr,
bool log)
{
sfid->ufid_len = get_ufid_len(attr, log);
if (sfid->ufid_len)
memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len);
return sfid->ufid_len;
}
int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
const struct sw_flow_key *key, bool log)
{
struct sw_flow_key *new_key;
if (ovs_nla_get_ufid(sfid, ufid, log))
return 0;
/* If UFID was not provided, use unmasked key. */
new_key = kmalloc(sizeof(*new_key), GFP_KERNEL);
if (!new_key)
return -ENOMEM;
memcpy(new_key, key, sizeof(*key));
sfid->unmasked_key = new_key;
return 0;
}
u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
{
return attr ? nla_get_u32(attr) : 0;
}
/**
* ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
* @net: Network namespace.
* @key: Receives extracted in_port, priority, tun_key, skb_mark and conntrack
* metadata.
* @a: Array of netlink attributes holding parsed %OVS_KEY_ATTR_* Netlink
* attributes.
* @attrs: Bit mask for the netlink attributes included in @a.
* @log: Boolean to allow kernel error logging. Normally true, but when
* probing for feature compatibility this should be passed in as false to
* suppress unnecessary error logging.
*
* This parses a series of Netlink attributes that form a flow key, which must
* take the same form accepted by flow_from_nlattrs(), but only enough of it to
* get the metadata, that is, the parts of the flow key that cannot be
* extracted from the packet itself.
*
* This must be called before the packet key fields are filled in 'key'.
*/
int ovs_nla_get_flow_metadata(struct net *net,
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1],
u64 attrs, struct sw_flow_key *key, bool log)
{
struct sw_flow_match match;
memset(&match, 0, sizeof(match));
match.key = key;
key->ct_state = 0;
key->ct_zone = 0;
key->ct_orig_proto = 0;
memset(&key->ct, 0, sizeof(key->ct));
memset(&key->ipv4.ct_orig, 0, sizeof(key->ipv4.ct_orig));
memset(&key->ipv6.ct_orig, 0, sizeof(key->ipv6.ct_orig));
key->phy.in_port = DP_MAX_PORTS;
return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
}
static int ovs_nla_put_vlan(struct sk_buff *skb, const struct vlan_head *vh,
bool is_mask)
{
__be16 eth_type = !is_mask ? vh->tpid : htons(0xffff);
if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
nla_put_be16(skb, OVS_KEY_ATTR_VLAN, vh->tci))
return -EMSGSIZE;
return 0;
}
static int nsh_key_to_nlattr(const struct ovs_key_nsh *nsh, bool is_mask,
struct sk_buff *skb)
{
struct nlattr *start;
start = nla_nest_start_noflag(skb, OVS_KEY_ATTR_NSH);
if (!start)
return -EMSGSIZE;
if (nla_put(skb, OVS_NSH_KEY_ATTR_BASE, sizeof(nsh->base), &nsh->base))
goto nla_put_failure;
if (is_mask || nsh->base.mdtype == NSH_M_TYPE1) {
if (nla_put(skb, OVS_NSH_KEY_ATTR_MD1,
sizeof(nsh->context), nsh->context))
goto nla_put_failure;
}
/* Don't support MD type 2 yet */
nla_nest_end(skb, start);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
const struct sw_flow_key *output, bool is_mask,
struct sk_buff *skb)
{
struct ovs_key_ethernet *eth_key;
struct nlattr *nla;
struct nlattr *encap = NULL;
struct nlattr *in_encap = NULL;
if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
goto nla_put_failure;
if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
goto nla_put_failure;
if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
goto nla_put_failure;
if ((swkey->tun_proto || is_mask)) {
const void *opts = NULL;
if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
swkey->tun_opts_len, swkey->tun_proto, 0))
goto nla_put_failure;
}
if (swkey->phy.in_port == DP_MAX_PORTS) {
if (is_mask && (output->phy.in_port == 0xffff))
if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
goto nla_put_failure;
} else {
u16 upper_u16;
upper_u16 = !is_mask ? 0 : 0xffff;
if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
(upper_u16 << 16) | output->phy.in_port))
goto nla_put_failure;
}
if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
goto nla_put_failure;
if (ovs_ct_put_key(swkey, output, skb))
goto nla_put_failure;
if (ovs_key_mac_proto(swkey) == MAC_PROTO_ETHERNET) {
nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
if (!nla)
goto nla_put_failure;
eth_key = nla_data(nla);
ether_addr_copy(eth_key->eth_src, output->eth.src);
ether_addr_copy(eth_key->eth_dst, output->eth.dst);
if (swkey->eth.vlan.tci || eth_type_vlan(swkey->eth.type)) {
if (ovs_nla_put_vlan(skb, &output->eth.vlan, is_mask))
goto nla_put_failure;
encap = nla_nest_start_noflag(skb, OVS_KEY_ATTR_ENCAP);
if (!swkey->eth.vlan.tci)
goto unencap;
if (swkey->eth.cvlan.tci || eth_type_vlan(swkey->eth.type)) {
if (ovs_nla_put_vlan(skb, &output->eth.cvlan, is_mask))
goto nla_put_failure;
in_encap = nla_nest_start_noflag(skb,
OVS_KEY_ATTR_ENCAP);
if (!swkey->eth.cvlan.tci)
goto unencap;
}
}
if (swkey->eth.type == htons(ETH_P_802_2)) {
/*
* Ethertype 802.2 is represented in the netlink with omitted
* OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
* 0xffff in the mask attribute. Ethertype can also
* be wildcarded.
*/
if (is_mask && output->eth.type)
if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
output->eth.type))
goto nla_put_failure;
goto unencap;
}
}
if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
goto nla_put_failure;
if (eth_type_vlan(swkey->eth.type)) {
/* There are 3 VLAN tags, we don't know anything about the rest
* of the packet, so truncate here.
*/
WARN_ON_ONCE(!(encap && in_encap));
goto unencap;
}
if (swkey->eth.type == htons(ETH_P_IP)) {
struct ovs_key_ipv4 *ipv4_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
if (!nla)
goto nla_put_failure;
ipv4_key = nla_data(nla);
ipv4_key->ipv4_src = output->ipv4.addr.src;
ipv4_key->ipv4_dst = output->ipv4.addr.dst;
ipv4_key->ipv4_proto = output->ip.proto;
ipv4_key->ipv4_tos = output->ip.tos;
ipv4_key->ipv4_ttl = output->ip.ttl;
ipv4_key->ipv4_frag = output->ip.frag;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
struct ovs_key_ipv6 *ipv6_key;
struct ovs_key_ipv6_exthdrs *ipv6_exthdrs_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
if (!nla)
goto nla_put_failure;
ipv6_key = nla_data(nla);
memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
sizeof(ipv6_key->ipv6_src));
memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
sizeof(ipv6_key->ipv6_dst));
ipv6_key->ipv6_label = output->ipv6.label;
ipv6_key->ipv6_proto = output->ip.proto;
ipv6_key->ipv6_tclass = output->ip.tos;
ipv6_key->ipv6_hlimit = output->ip.ttl;
ipv6_key->ipv6_frag = output->ip.frag;
nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6_EXTHDRS,
sizeof(*ipv6_exthdrs_key));
if (!nla)
goto nla_put_failure;
ipv6_exthdrs_key = nla_data(nla);
ipv6_exthdrs_key->hdrs = output->ipv6.exthdrs;
} else if (swkey->eth.type == htons(ETH_P_NSH)) {
if (nsh_key_to_nlattr(&output->nsh, is_mask, skb))
goto nla_put_failure;
} else if (swkey->eth.type == htons(ETH_P_ARP) ||
swkey->eth.type == htons(ETH_P_RARP)) {
struct ovs_key_arp *arp_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
if (!nla)
goto nla_put_failure;
arp_key = nla_data(nla);
memset(arp_key, 0, sizeof(struct ovs_key_arp));
arp_key->arp_sip = output->ipv4.addr.src;
arp_key->arp_tip = output->ipv4.addr.dst;
arp_key->arp_op = htons(output->ip.proto);
ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
} else if (eth_p_mpls(swkey->eth.type)) {
u8 i, num_labels;
struct ovs_key_mpls *mpls_key;
num_labels = hweight_long(output->mpls.num_labels_mask);
nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS,
num_labels * sizeof(*mpls_key));
if (!nla)
goto nla_put_failure;
mpls_key = nla_data(nla);
for (i = 0; i < num_labels; i++)
mpls_key[i].mpls_lse = output->mpls.lse[i];
}
if ((swkey->eth.type == htons(ETH_P_IP) ||
swkey->eth.type == htons(ETH_P_IPV6)) &&
swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
if (swkey->ip.proto == IPPROTO_TCP) {
struct ovs_key_tcp *tcp_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
if (!nla)
goto nla_put_failure;
tcp_key = nla_data(nla);
tcp_key->tcp_src = output->tp.src;
tcp_key->tcp_dst = output->tp.dst;
if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
output->tp.flags))
goto nla_put_failure;
} else if (swkey->ip.proto == IPPROTO_UDP) {
struct ovs_key_udp *udp_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
if (!nla)
goto nla_put_failure;
udp_key = nla_data(nla);
udp_key->udp_src = output->tp.src;
udp_key->udp_dst = output->tp.dst;
} else if (swkey->ip.proto == IPPROTO_SCTP) {
struct ovs_key_sctp *sctp_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
if (!nla)
goto nla_put_failure;
sctp_key = nla_data(nla);
sctp_key->sctp_src = output->tp.src;
sctp_key->sctp_dst = output->tp.dst;
} else if (swkey->eth.type == htons(ETH_P_IP) &&
swkey->ip.proto == IPPROTO_ICMP) {
struct ovs_key_icmp *icmp_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
if (!nla)
goto nla_put_failure;
icmp_key = nla_data(nla);
icmp_key->icmp_type = ntohs(output->tp.src);
icmp_key->icmp_code = ntohs(output->tp.dst);
} else if (swkey->eth.type == htons(ETH_P_IPV6) &&
swkey->ip.proto == IPPROTO_ICMPV6) {
struct ovs_key_icmpv6 *icmpv6_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
sizeof(*icmpv6_key));
if (!nla)
goto nla_put_failure;
icmpv6_key = nla_data(nla);
icmpv6_key->icmpv6_type = ntohs(output->tp.src);
icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
if (swkey->tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
swkey->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
struct ovs_key_nd *nd_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
if (!nla)
goto nla_put_failure;
nd_key = nla_data(nla);
memcpy(nd_key->nd_target, &output->ipv6.nd.target,
sizeof(nd_key->nd_target));
ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
}
}
}
unencap:
if (in_encap)
nla_nest_end(skb, in_encap);
if (encap)
nla_nest_end(skb, encap);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
int ovs_nla_put_key(const struct sw_flow_key *swkey,
const struct sw_flow_key *output, int attr, bool is_mask,
struct sk_buff *skb)
{
int err;
struct nlattr *nla;
nla = nla_nest_start_noflag(skb, attr);
if (!nla)
return -EMSGSIZE;
err = __ovs_nla_put_key(swkey, output, is_mask, skb);
if (err)
return err;
nla_nest_end(skb, nla);
return 0;
}
/* Called with ovs_mutex or RCU read lock. */
int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
{
if (ovs_identifier_is_ufid(&flow->id))
return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len,
flow->id.ufid);
return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key,
OVS_FLOW_ATTR_KEY, false, skb);
}
/* Called with ovs_mutex or RCU read lock. */
int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
{
return ovs_nla_put_key(&flow->key, &flow->key,
OVS_FLOW_ATTR_KEY, false, skb);
}
/* Called with ovs_mutex or RCU read lock. */
int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
{
return ovs_nla_put_key(&flow->key, &flow->mask->key,
OVS_FLOW_ATTR_MASK, true, skb);
}
#define MAX_ACTIONS_BUFSIZE (32 * 1024)
static struct sw_flow_actions *nla_alloc_flow_actions(int size)
{
struct sw_flow_actions *sfa;
WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
sfa = kmalloc(kmalloc_size_roundup(sizeof(*sfa) + size), GFP_KERNEL);
if (!sfa)
return ERR_PTR(-ENOMEM);
sfa->actions_len = 0;
return sfa;
}
static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len);
static void ovs_nla_free_check_pkt_len_action(const struct nlattr *action)
{
const struct nlattr *a;
int rem;
nla_for_each_nested(a, action, rem) {
switch (nla_type(a)) {
case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL:
case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER:
ovs_nla_free_nested_actions(nla_data(a), nla_len(a));
break;
}
}
}
static void ovs_nla_free_clone_action(const struct nlattr *action)
{
const struct nlattr *a = nla_data(action);
int rem = nla_len(action);
switch (nla_type(a)) {
case OVS_CLONE_ATTR_EXEC:
/* The real list of actions follows this attribute. */
a = nla_next(a, &rem);
ovs_nla_free_nested_actions(a, rem);
break;
}
}
static void ovs_nla_free_dec_ttl_action(const struct nlattr *action)
{
const struct nlattr *a = nla_data(action);
switch (nla_type(a)) {
case OVS_DEC_TTL_ATTR_ACTION:
ovs_nla_free_nested_actions(nla_data(a), nla_len(a));
break;
}
}
static void ovs_nla_free_sample_action(const struct nlattr *action)
{
const struct nlattr *a = nla_data(action);
int rem = nla_len(action);
switch (nla_type(a)) {
case OVS_SAMPLE_ATTR_ARG:
/* The real list of actions follows this attribute. */
a = nla_next(a, &rem);
ovs_nla_free_nested_actions(a, rem);
break;
}
}
static void ovs_nla_free_set_action(const struct nlattr *a)
{
const struct nlattr *ovs_key = nla_data(a);
struct ovs_tunnel_info *ovs_tun;
switch (nla_type(ovs_key)) {
case OVS_KEY_ATTR_TUNNEL_INFO:
ovs_tun = nla_data(ovs_key);
dst_release((struct dst_entry *)ovs_tun->tun_dst);
break;
}
}
static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len)
{
const struct nlattr *a;
int rem;
/* Whenever new actions are added, the need to update this
* function should be considered.
*/
BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 24);
if (!actions)
return;
nla_for_each_attr(a, actions, len, rem) {
switch (nla_type(a)) {
case OVS_ACTION_ATTR_CHECK_PKT_LEN:
ovs_nla_free_check_pkt_len_action(a);
break;
case OVS_ACTION_ATTR_CLONE:
ovs_nla_free_clone_action(a);
break;
case OVS_ACTION_ATTR_CT:
ovs_ct_free_action(a);
break;
case OVS_ACTION_ATTR_DEC_TTL:
ovs_nla_free_dec_ttl_action(a);
break;
case OVS_ACTION_ATTR_SAMPLE:
ovs_nla_free_sample_action(a);
break;
case OVS_ACTION_ATTR_SET:
ovs_nla_free_set_action(a);
break;
}
}
}
void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
{
if (!sf_acts)
return;
ovs_nla_free_nested_actions(sf_acts->actions, sf_acts->actions_len);
kfree(sf_acts);
}
static void __ovs_nla_free_flow_actions(struct rcu_head *head)
{
ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
}
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
{
call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
}
static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
int attr_len, bool log)
{
struct sw_flow_actions *acts;
int new_acts_size;
size_t req_size = NLA_ALIGN(attr_len);
int next_offset = offsetof(struct sw_flow_actions, actions) +
(*sfa)->actions_len;
if (req_size <= (ksize(*sfa) - next_offset))
goto out;
new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
OVS_NLERR(log, "Flow action size exceeds max %u",
MAX_ACTIONS_BUFSIZE);
return ERR_PTR(-EMSGSIZE);
}
new_acts_size = MAX_ACTIONS_BUFSIZE;
}
acts = nla_alloc_flow_actions(new_acts_size);
if (IS_ERR(acts))
return (void *)acts;
memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
acts->actions_len = (*sfa)->actions_len;
acts->orig_len = (*sfa)->orig_len;
kfree(*sfa);
*sfa = acts;
out:
(*sfa)->actions_len += req_size;
return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
}
static struct nlattr *__add_action(struct sw_flow_actions **sfa,
int attrtype, void *data, int len, bool log)
{
struct nlattr *a;
a = reserve_sfa_size(sfa, nla_attr_size(len), log);
if (IS_ERR(a))
return a;
a->nla_type = attrtype;
a->nla_len = nla_attr_size(len);
if (data)
memcpy(nla_data(a), data, len);
memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
return a;
}
int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype, void *data,
int len, bool log)
{
struct nlattr *a;
a = __add_action(sfa, attrtype, data, len, log);
return PTR_ERR_OR_ZERO(a);
}
static inline int add_nested_action_start(struct sw_flow_actions **sfa,
int attrtype, bool log)
{
int used = (*sfa)->actions_len;
int err;
err = ovs_nla_add_action(sfa, attrtype, NULL, 0, log);
if (err)
return err;
return used;
}
static inline void add_nested_action_end(struct sw_flow_actions *sfa,
int st_offset)
{
struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
st_offset);
a->nla_len = sfa->actions_len - st_offset;
}
static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
u32 mpls_label_count, bool log);
static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
u32 mpls_label_count, bool log, bool last)
{
const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
const struct nlattr *probability, *actions;
const struct nlattr *a;
int rem, start, err;
struct sample_arg arg;
memset(attrs, 0, sizeof(attrs));
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
return -EINVAL;
attrs[type] = a;
}
if (rem)
return -EINVAL;
probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
if (!probability || nla_len(probability) != sizeof(u32))
return -EINVAL;
actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
return -EINVAL;
/* validation done, copy sample action. */
start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
if (start < 0)
return start;
/* When both skb and flow may be changed, put the sample
* into a deferred fifo. On the other hand, if only skb
* may be modified, the actions can be executed in place.
*
* Do this analysis at the flow installation time.
* Set 'clone_action->exec' to true if the actions can be
* executed without being deferred.
*
* If the sample is the last action, it can always be excuted
* rather than deferred.
*/
arg.exec = last || !actions_may_change_flow(actions);
arg.probability = nla_get_u32(probability);
err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_ARG, &arg, sizeof(arg),
log);
if (err)
return err;
err = __ovs_nla_copy_actions(net, actions, key, sfa,
eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
add_nested_action_end(*sfa, start);
return 0;
}
static int validate_and_copy_dec_ttl(struct net *net,
const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
u32 mpls_label_count, bool log)
{
const struct nlattr *attrs[OVS_DEC_TTL_ATTR_MAX + 1];
int start, action_start, err, rem;
const struct nlattr *a, *actions;
memset(attrs, 0, sizeof(attrs));
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
/* Ignore unknown attributes to be future proof. */
if (type > OVS_DEC_TTL_ATTR_MAX)
continue;
if (!type || attrs[type]) {
OVS_NLERR(log, "Duplicate or invalid key (type %d).",
type);
return -EINVAL;
}
attrs[type] = a;
}
if (rem) {
OVS_NLERR(log, "Message has %d unknown bytes.", rem);
return -EINVAL;
}
actions = attrs[OVS_DEC_TTL_ATTR_ACTION];
if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN)) {
OVS_NLERR(log, "Missing valid actions attribute.");
return -EINVAL;
}
start = add_nested_action_start(sfa, OVS_ACTION_ATTR_DEC_TTL, log);
if (start < 0)
return start;
action_start = add_nested_action_start(sfa, OVS_DEC_TTL_ATTR_ACTION, log);
if (action_start < 0)
return action_start;
err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type,
vlan_tci, mpls_label_count, log);
if (err)
return err;
add_nested_action_end(*sfa, action_start);
add_nested_action_end(*sfa, start);
return 0;
}
static int validate_and_copy_clone(struct net *net,
const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
u32 mpls_label_count, bool log, bool last)
{
int start, err;
u32 exec;
if (nla_len(attr) && nla_len(attr) < NLA_HDRLEN)
return -EINVAL;
start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CLONE, log);
if (start < 0)
return start;
exec = last || !actions_may_change_flow(attr);
err = ovs_nla_add_action(sfa, OVS_CLONE_ATTR_EXEC, &exec,
sizeof(exec), log);
if (err)
return err;
err = __ovs_nla_copy_actions(net, attr, key, sfa,
eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
add_nested_action_end(*sfa, start);
return 0;
}
void ovs_match_init(struct sw_flow_match *match,
struct sw_flow_key *key,
bool reset_key,
struct sw_flow_mask *mask)
{
memset(match, 0, sizeof(*match));
match->key = key;
match->mask = mask;
if (reset_key)
memset(key, 0, sizeof(*key));
if (mask) {
memset(&mask->key, 0, sizeof(mask->key));
mask->range.start = mask->range.end = 0;
}
}
static int validate_geneve_opts(struct sw_flow_key *key)
{
struct geneve_opt *option;
int opts_len = key->tun_opts_len;
bool crit_opt = false;
option = (struct geneve_opt *)TUN_METADATA_OPTS(key, key->tun_opts_len);
while (opts_len > 0) {
int len;
if (opts_len < sizeof(*option))
return -EINVAL;
len = sizeof(*option) + option->length * 4;
if (len > opts_len)
return -EINVAL;
crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
option = (struct geneve_opt *)((u8 *)option + len);
opts_len -= len;
}
key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
return 0;
}
static int validate_and_copy_set_tun(const struct nlattr *attr,
struct sw_flow_actions **sfa, bool log)
{
struct sw_flow_match match;
struct sw_flow_key key;
struct metadata_dst *tun_dst;
struct ip_tunnel_info *tun_info;
struct ovs_tunnel_info *ovs_tun;
struct nlattr *a;
int err = 0, start, opts_type;
__be16 dst_opt_type;
dst_opt_type = 0;
ovs_match_init(&match, &key, true, NULL);
opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
if (opts_type < 0)
return opts_type;
if (key.tun_opts_len) {
switch (opts_type) {
case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
err = validate_geneve_opts(&key);
if (err < 0)
return err;
dst_opt_type = TUNNEL_GENEVE_OPT;
break;
case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
dst_opt_type = TUNNEL_VXLAN_OPT;
break;
case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
dst_opt_type = TUNNEL_ERSPAN_OPT;
break;
}
}
start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
if (start < 0)
return start;
tun_dst = metadata_dst_alloc(key.tun_opts_len, METADATA_IP_TUNNEL,
GFP_KERNEL);
if (!tun_dst)
return -ENOMEM;
err = dst_cache_init(&tun_dst->u.tun_info.dst_cache, GFP_KERNEL);
if (err) {
dst_release((struct dst_entry *)tun_dst);
return err;
}
a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
sizeof(*ovs_tun), log);
if (IS_ERR(a)) {
dst_release((struct dst_entry *)tun_dst);
return PTR_ERR(a);
}
ovs_tun = nla_data(a);
ovs_tun->tun_dst = tun_dst;
tun_info = &tun_dst->u.tun_info;
tun_info->mode = IP_TUNNEL_INFO_TX;
if (key.tun_proto == AF_INET6)
tun_info->mode |= IP_TUNNEL_INFO_IPV6;
else if (key.tun_proto == AF_INET && key.tun_key.u.ipv4.dst == 0)
tun_info->mode |= IP_TUNNEL_INFO_BRIDGE;
tun_info->key = key.tun_key;
/* We need to store the options in the action itself since
* everything else will go away after flow setup. We can append
* it to tun_info and then point there.
*/
ip_tunnel_info_opts_set(tun_info,
TUN_METADATA_OPTS(&key, key.tun_opts_len),
key.tun_opts_len, dst_opt_type);
add_nested_action_end(*sfa, start);
return err;
}
static bool validate_nsh(const struct nlattr *attr, bool is_mask,
bool is_push_nsh, bool log)
{
struct sw_flow_match match;
struct sw_flow_key key;
int ret = 0;
ovs_match_init(&match, &key, true, NULL);
ret = nsh_key_put_from_nlattr(attr, &match, is_mask,
is_push_nsh, log);
return !ret;
}
/* Return false if there are any non-masked bits set.
* Mask follows data immediately, before any netlink padding.
*/
static bool validate_masked(u8 *data, int len)
{
u8 *mask = data + len;
while (len--)
if (*data++ & ~*mask++)
return false;
return true;
}
static int validate_set(const struct nlattr *a,
const struct sw_flow_key *flow_key,
struct sw_flow_actions **sfa, bool *skip_copy,
u8 mac_proto, __be16 eth_type, bool masked, bool log)
{
const struct nlattr *ovs_key = nla_data(a);
int key_type = nla_type(ovs_key);
size_t key_len;
/* There can be only one key in a action */
if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
return -EINVAL;
key_len = nla_len(ovs_key);
if (masked)
key_len /= 2;
if (key_type > OVS_KEY_ATTR_MAX ||
!check_attr_len(key_len, ovs_key_lens[key_type].len))
return -EINVAL;
if (masked && !validate_masked(nla_data(ovs_key), key_len))
return -EINVAL;
switch (key_type) {
case OVS_KEY_ATTR_PRIORITY:
case OVS_KEY_ATTR_SKB_MARK:
case OVS_KEY_ATTR_CT_MARK:
case OVS_KEY_ATTR_CT_LABELS:
break;
case OVS_KEY_ATTR_ETHERNET:
if (mac_proto != MAC_PROTO_ETHERNET)
return -EINVAL;
break;
case OVS_KEY_ATTR_TUNNEL: {
int err;
if (masked)
return -EINVAL; /* Masked tunnel set not supported. */
*skip_copy = true;
err = validate_and_copy_set_tun(a, sfa, log);
if (err)
return err;
break;
}
case OVS_KEY_ATTR_IPV4: {
const struct ovs_key_ipv4 *ipv4_key;
if (eth_type != htons(ETH_P_IP))
return -EINVAL;
ipv4_key = nla_data(ovs_key);
if (masked) {
const struct ovs_key_ipv4 *mask = ipv4_key + 1;
/* Non-writeable fields. */
if (mask->ipv4_proto || mask->ipv4_frag)
return -EINVAL;
} else {
if (ipv4_key->ipv4_proto != flow_key->ip.proto)
return -EINVAL;
if (ipv4_key->ipv4_frag != flow_key->ip.frag)
return -EINVAL;
}
break;
}
case OVS_KEY_ATTR_IPV6: {
const struct ovs_key_ipv6 *ipv6_key;
if (eth_type != htons(ETH_P_IPV6))
return -EINVAL;
ipv6_key = nla_data(ovs_key);
if (masked) {
const struct ovs_key_ipv6 *mask = ipv6_key + 1;
/* Non-writeable fields. */
if (mask->ipv6_proto || mask->ipv6_frag)
return -EINVAL;
/* Invalid bits in the flow label mask? */
if (ntohl(mask->ipv6_label) & 0xFFF00000)
return -EINVAL;
} else {
if (ipv6_key->ipv6_proto != flow_key->ip.proto)
return -EINVAL;
if (ipv6_key->ipv6_frag != flow_key->ip.frag)
return -EINVAL;
}
if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
return -EINVAL;
break;
}
case OVS_KEY_ATTR_TCP:
if ((eth_type != htons(ETH_P_IP) &&
eth_type != htons(ETH_P_IPV6)) ||
flow_key->ip.proto != IPPROTO_TCP)
return -EINVAL;
break;
case OVS_KEY_ATTR_UDP:
if ((eth_type != htons(ETH_P_IP) &&
eth_type != htons(ETH_P_IPV6)) ||
flow_key->ip.proto != IPPROTO_UDP)
return -EINVAL;
break;
case OVS_KEY_ATTR_MPLS:
if (!eth_p_mpls(eth_type))
return -EINVAL;
break;
case OVS_KEY_ATTR_SCTP:
if ((eth_type != htons(ETH_P_IP) &&
eth_type != htons(ETH_P_IPV6)) ||
flow_key->ip.proto != IPPROTO_SCTP)
return -EINVAL;
break;
case OVS_KEY_ATTR_NSH:
if (eth_type != htons(ETH_P_NSH))
return -EINVAL;
if (!validate_nsh(nla_data(a), masked, false, log))
return -EINVAL;
break;
default:
return -EINVAL;
}
/* Convert non-masked non-tunnel set actions to masked set actions. */
if (!masked && key_type != OVS_KEY_ATTR_TUNNEL) {
int start, len = key_len * 2;
struct nlattr *at;
*skip_copy = true;
start = add_nested_action_start(sfa,
OVS_ACTION_ATTR_SET_TO_MASKED,
log);
if (start < 0)
return start;
at = __add_action(sfa, key_type, NULL, len, log);
if (IS_ERR(at))
return PTR_ERR(at);
memcpy(nla_data(at), nla_data(ovs_key), key_len); /* Key. */
memset(nla_data(at) + key_len, 0xff, key_len); /* Mask. */
/* Clear non-writeable bits from otherwise writeable fields. */
if (key_type == OVS_KEY_ATTR_IPV6) {
struct ovs_key_ipv6 *mask = nla_data(at) + key_len;
mask->ipv6_label &= htonl(0x000FFFFF);
}
add_nested_action_end(*sfa, start);
}
return 0;
}
static int validate_userspace(const struct nlattr *attr)
{
static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
[OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
[OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
[OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 },
};
struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
int error;
error = nla_parse_nested_deprecated(a, OVS_USERSPACE_ATTR_MAX, attr,
userspace_policy, NULL);
if (error)
return error;
if (!a[OVS_USERSPACE_ATTR_PID] ||
!nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
return -EINVAL;
return 0;
}
static const struct nla_policy cpl_policy[OVS_CHECK_PKT_LEN_ATTR_MAX + 1] = {
[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] = {.type = NLA_U16 },
[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] = {.type = NLA_NESTED },
[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL] = {.type = NLA_NESTED },
};
static int validate_and_copy_check_pkt_len(struct net *net,
const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
u32 mpls_label_count,
bool log, bool last)
{
const struct nlattr *acts_if_greater, *acts_if_lesser_eq;
struct nlattr *a[OVS_CHECK_PKT_LEN_ATTR_MAX + 1];
struct check_pkt_len_arg arg;
int nested_acts_start;
int start, err;
err = nla_parse_deprecated_strict(a, OVS_CHECK_PKT_LEN_ATTR_MAX,
nla_data(attr), nla_len(attr),
cpl_policy, NULL);
if (err)
return err;
if (!a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] ||
!nla_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]))
return -EINVAL;
acts_if_lesser_eq = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL];
acts_if_greater = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER];
/* Both the nested action should be present. */
if (!acts_if_greater || !acts_if_lesser_eq)
return -EINVAL;
/* validation done, copy the nested actions. */
start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CHECK_PKT_LEN,
log);
if (start < 0)
return start;
arg.pkt_len = nla_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]);
arg.exec_for_lesser_equal =
last || !actions_may_change_flow(acts_if_lesser_eq);
arg.exec_for_greater =
last || !actions_may_change_flow(acts_if_greater);
err = ovs_nla_add_action(sfa, OVS_CHECK_PKT_LEN_ATTR_ARG, &arg,
sizeof(arg), log);
if (err)
return err;
nested_acts_start = add_nested_action_start(sfa,
OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL, log);
if (nested_acts_start < 0)
return nested_acts_start;
err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa,
eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
add_nested_action_end(*sfa, nested_acts_start);
nested_acts_start = add_nested_action_start(sfa,
OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER, log);
if (nested_acts_start < 0)
return nested_acts_start;
err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa,
eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
add_nested_action_end(*sfa, nested_acts_start);
add_nested_action_end(*sfa, start);
return 0;
}
static int copy_action(const struct nlattr *from,
struct sw_flow_actions **sfa, bool log)
{
int totlen = NLA_ALIGN(from->nla_len);
struct nlattr *to;
to = reserve_sfa_size(sfa, from->nla_len, log);
if (IS_ERR(to))
return PTR_ERR(to);
memcpy(to, from, totlen);
return 0;
}
static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
u32 mpls_label_count, bool log)
{
u8 mac_proto = ovs_key_mac_proto(key);
const struct nlattr *a;
int rem, err;
nla_for_each_nested(a, attr, rem) {
/* Expected argument lengths, (u32)-1 for variable length. */
static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
[OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
[OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
[OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
[OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls),
[OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16),
[OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
[OVS_ACTION_ATTR_POP_VLAN] = 0,
[OVS_ACTION_ATTR_SET] = (u32)-1,
[OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
[OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
[OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash),
[OVS_ACTION_ATTR_CT] = (u32)-1,
[OVS_ACTION_ATTR_CT_CLEAR] = 0,
[OVS_ACTION_ATTR_TRUNC] = sizeof(struct ovs_action_trunc),
[OVS_ACTION_ATTR_PUSH_ETH] = sizeof(struct ovs_action_push_eth),
[OVS_ACTION_ATTR_POP_ETH] = 0,
[OVS_ACTION_ATTR_PUSH_NSH] = (u32)-1,
[OVS_ACTION_ATTR_POP_NSH] = 0,
[OVS_ACTION_ATTR_METER] = sizeof(u32),
[OVS_ACTION_ATTR_CLONE] = (u32)-1,
[OVS_ACTION_ATTR_CHECK_PKT_LEN] = (u32)-1,
[OVS_ACTION_ATTR_ADD_MPLS] = sizeof(struct ovs_action_add_mpls),
[OVS_ACTION_ATTR_DEC_TTL] = (u32)-1,
[OVS_ACTION_ATTR_DROP] = sizeof(u32),
};
const struct ovs_action_push_vlan *vlan;
int type = nla_type(a);
bool skip_copy;
if (type > OVS_ACTION_ATTR_MAX ||
(action_lens[type] != nla_len(a) &&
action_lens[type] != (u32)-1))
return -EINVAL;
skip_copy = false;
switch (type) {
case OVS_ACTION_ATTR_UNSPEC:
return -EINVAL;
case OVS_ACTION_ATTR_USERSPACE:
err = validate_userspace(a);
if (err)
return err;
break;
case OVS_ACTION_ATTR_OUTPUT:
if (nla_get_u32(a) >= DP_MAX_PORTS)
return -EINVAL;
break;
case OVS_ACTION_ATTR_TRUNC: {
const struct ovs_action_trunc *trunc = nla_data(a);
if (trunc->max_len < ETH_HLEN)
return -EINVAL;
break;
}
case OVS_ACTION_ATTR_HASH: {
const struct ovs_action_hash *act_hash = nla_data(a);
switch (act_hash->hash_alg) {
case OVS_HASH_ALG_L4:
fallthrough;
case OVS_HASH_ALG_SYM_L4:
break;
default:
return -EINVAL;
}
break;
}
case OVS_ACTION_ATTR_POP_VLAN:
if (mac_proto != MAC_PROTO_ETHERNET)
return -EINVAL;
vlan_tci = htons(0);
break;
case OVS_ACTION_ATTR_PUSH_VLAN:
if (mac_proto != MAC_PROTO_ETHERNET)
return -EINVAL;
vlan = nla_data(a);
if (!eth_type_vlan(vlan->vlan_tpid))
return -EINVAL;
if (!(vlan->vlan_tci & htons(VLAN_CFI_MASK)))
return -EINVAL;
vlan_tci = vlan->vlan_tci;
break;
case OVS_ACTION_ATTR_RECIRC:
break;
case OVS_ACTION_ATTR_ADD_MPLS: {
const struct ovs_action_add_mpls *mpls = nla_data(a);
if (!eth_p_mpls(mpls->mpls_ethertype))
return -EINVAL;
if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK) {
if (vlan_tci & htons(VLAN_CFI_MASK) ||
(eth_type != htons(ETH_P_IP) &&
eth_type != htons(ETH_P_IPV6) &&
eth_type != htons(ETH_P_ARP) &&
eth_type != htons(ETH_P_RARP) &&
!eth_p_mpls(eth_type)))
return -EINVAL;
mpls_label_count++;
} else {
if (mac_proto == MAC_PROTO_ETHERNET) {
mpls_label_count = 1;
mac_proto = MAC_PROTO_NONE;
} else {
mpls_label_count++;
}
}
eth_type = mpls->mpls_ethertype;
break;
}
case OVS_ACTION_ATTR_PUSH_MPLS: {
const struct ovs_action_push_mpls *mpls = nla_data(a);
if (!eth_p_mpls(mpls->mpls_ethertype))
return -EINVAL;
/* Prohibit push MPLS other than to a white list
* for packets that have a known tag order.
*/
if (vlan_tci & htons(VLAN_CFI_MASK) ||
(eth_type != htons(ETH_P_IP) &&
eth_type != htons(ETH_P_IPV6) &&
eth_type != htons(ETH_P_ARP) &&
eth_type != htons(ETH_P_RARP) &&
!eth_p_mpls(eth_type)))
return -EINVAL;
eth_type = mpls->mpls_ethertype;
mpls_label_count++;
break;
}
case OVS_ACTION_ATTR_POP_MPLS: {
__be16 proto;
if (vlan_tci & htons(VLAN_CFI_MASK) ||
!eth_p_mpls(eth_type))
return -EINVAL;
/* Disallow subsequent L2.5+ set actions and mpls_pop
* actions once the last MPLS label in the packet is
* popped as there is no check here to ensure that
* the new eth type is valid and thus set actions could
* write off the end of the packet or otherwise corrupt
* it.
*
* Support for these actions is planned using packet
* recirculation.
*/
proto = nla_get_be16(a);
if (proto == htons(ETH_P_TEB) &&
mac_proto != MAC_PROTO_NONE)
return -EINVAL;
mpls_label_count--;
if (!eth_p_mpls(proto) || !mpls_label_count)
eth_type = htons(0);
else
eth_type = proto;
break;
}
case OVS_ACTION_ATTR_SET:
err = validate_set(a, key, sfa,
&skip_copy, mac_proto, eth_type,
false, log);
if (err)
return err;
break;
case OVS_ACTION_ATTR_SET_MASKED:
err = validate_set(a, key, sfa,
&skip_copy, mac_proto, eth_type,
true, log);
if (err)
return err;
break;
case OVS_ACTION_ATTR_SAMPLE: {
bool last = nla_is_last(a, rem);
err = validate_and_copy_sample(net, a, key, sfa,
eth_type, vlan_tci,
mpls_label_count,
log, last);
if (err)
return err;
skip_copy = true;
break;
}
case OVS_ACTION_ATTR_CT:
err = ovs_ct_copy_action(net, a, key, sfa, log);
if (err)
return err;
skip_copy = true;
break;
case OVS_ACTION_ATTR_CT_CLEAR:
break;
case OVS_ACTION_ATTR_PUSH_ETH:
/* Disallow pushing an Ethernet header if one
* is already present */
if (mac_proto != MAC_PROTO_NONE)
return -EINVAL;
mac_proto = MAC_PROTO_ETHERNET;
break;
case OVS_ACTION_ATTR_POP_ETH:
if (mac_proto != MAC_PROTO_ETHERNET)
return -EINVAL;
if (vlan_tci & htons(VLAN_CFI_MASK))
return -EINVAL;
mac_proto = MAC_PROTO_NONE;
break;
case OVS_ACTION_ATTR_PUSH_NSH:
if (mac_proto != MAC_PROTO_ETHERNET) {
u8 next_proto;
next_proto = tun_p_from_eth_p(eth_type);
if (!next_proto)
return -EINVAL;
}
mac_proto = MAC_PROTO_NONE;
if (!validate_nsh(nla_data(a), false, true, true))
return -EINVAL;
break;
case OVS_ACTION_ATTR_POP_NSH: {
__be16 inner_proto;
if (eth_type != htons(ETH_P_NSH))
return -EINVAL;
inner_proto = tun_p_to_eth_p(key->nsh.base.np);
if (!inner_proto)
return -EINVAL;
if (key->nsh.base.np == TUN_P_ETHERNET)
mac_proto = MAC_PROTO_ETHERNET;
else
mac_proto = MAC_PROTO_NONE;
break;
}
case OVS_ACTION_ATTR_METER:
/* Non-existent meters are simply ignored. */
break;
case OVS_ACTION_ATTR_CLONE: {
bool last = nla_is_last(a, rem);
err = validate_and_copy_clone(net, a, key, sfa,
eth_type, vlan_tci,
mpls_label_count,
log, last);
if (err)
return err;
skip_copy = true;
break;
}
case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
bool last = nla_is_last(a, rem);
err = validate_and_copy_check_pkt_len(net, a, key, sfa,
eth_type,
vlan_tci,
mpls_label_count,
log, last);
if (err)
return err;
skip_copy = true;
break;
}
case OVS_ACTION_ATTR_DEC_TTL:
err = validate_and_copy_dec_ttl(net, a, key, sfa,
eth_type, vlan_tci,
mpls_label_count, log);
if (err)
return err;
skip_copy = true;
break;
case OVS_ACTION_ATTR_DROP:
if (!nla_is_last(a, rem))
return -EINVAL;
break;
default:
OVS_NLERR(log, "Unknown Action type %d", type);
return -EINVAL;
}
if (!skip_copy) {
err = copy_action(a, sfa, log);
if (err)
return err;
}
}
if (rem > 0)
return -EINVAL;
return 0;
}
/* 'key' must be the masked key. */
int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa, bool log)
{
int err;
u32 mpls_label_count = 0;
*sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
if (IS_ERR(*sfa))
return PTR_ERR(*sfa);
if (eth_p_mpls(key->eth.type))
mpls_label_count = hweight_long(key->mpls.num_labels_mask);
(*sfa)->orig_len = nla_len(attr);
err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type,
key->eth.vlan.tci, mpls_label_count, log);
if (err)
ovs_nla_free_flow_actions(*sfa);
return err;
}
static int sample_action_to_attr(const struct nlattr *attr,
struct sk_buff *skb)
{
struct nlattr *start, *ac_start = NULL, *sample_arg;
int err = 0, rem = nla_len(attr);
const struct sample_arg *arg;
struct nlattr *actions;
start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SAMPLE);
if (!start)
return -EMSGSIZE;
sample_arg = nla_data(attr);
arg = nla_data(sample_arg);
actions = nla_next(sample_arg, &rem);
if (nla_put_u32(skb, OVS_SAMPLE_ATTR_PROBABILITY, arg->probability)) {
err = -EMSGSIZE;
goto out;
}
ac_start = nla_nest_start_noflag(skb, OVS_SAMPLE_ATTR_ACTIONS);
if (!ac_start) {
err = -EMSGSIZE;
goto out;
}
err = ovs_nla_put_actions(actions, rem, skb);
out:
if (err) {
nla_nest_cancel(skb, ac_start);
nla_nest_cancel(skb, start);
} else {
nla_nest_end(skb, ac_start);
nla_nest_end(skb, start);
}
return err;
}
static int clone_action_to_attr(const struct nlattr *attr,
struct sk_buff *skb)
{
struct nlattr *start;
int err = 0, rem = nla_len(attr);
start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CLONE);
if (!start)
return -EMSGSIZE;
/* Skipping the OVS_CLONE_ATTR_EXEC that is always the first attribute. */
attr = nla_next(nla_data(attr), &rem);
err = ovs_nla_put_actions(attr, rem, skb);
if (err)
nla_nest_cancel(skb, start);
else
nla_nest_end(skb, start);
return err;
}
static int check_pkt_len_action_to_attr(const struct nlattr *attr,
struct sk_buff *skb)
{
struct nlattr *start, *ac_start = NULL;
const struct check_pkt_len_arg *arg;
const struct nlattr *a, *cpl_arg;
int err = 0, rem = nla_len(attr);
start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CHECK_PKT_LEN);
if (!start)
return -EMSGSIZE;
/* The first nested attribute in 'attr' is always
* 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
*/
cpl_arg = nla_data(attr);
arg = nla_data(cpl_arg);
if (nla_put_u16(skb, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN, arg->pkt_len)) {
err = -EMSGSIZE;
goto out;
}
/* Second nested attribute in 'attr' is always
* 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
*/
a = nla_next(cpl_arg, &rem);
ac_start = nla_nest_start_noflag(skb,
OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
if (!ac_start) {
err = -EMSGSIZE;
goto out;
}
err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
if (err) {
nla_nest_cancel(skb, ac_start);
goto out;
} else {
nla_nest_end(skb, ac_start);
}
/* Third nested attribute in 'attr' is always
* OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER.
*/
a = nla_next(a, &rem);
ac_start = nla_nest_start_noflag(skb,
OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
if (!ac_start) {
err = -EMSGSIZE;
goto out;
}
err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
if (err) {
nla_nest_cancel(skb, ac_start);
goto out;
} else {
nla_nest_end(skb, ac_start);
}
nla_nest_end(skb, start);
return 0;
out:
nla_nest_cancel(skb, start);
return err;
}
static int dec_ttl_action_to_attr(const struct nlattr *attr,
struct sk_buff *skb)
{
struct nlattr *start, *action_start;
const struct nlattr *a;
int err = 0, rem;
start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_DEC_TTL);
if (!start)
return -EMSGSIZE;
nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
switch (nla_type(a)) {
case OVS_DEC_TTL_ATTR_ACTION:
action_start = nla_nest_start_noflag(skb, OVS_DEC_TTL_ATTR_ACTION);
if (!action_start) {
err = -EMSGSIZE;
goto out;
}
err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
if (err)
goto out;
nla_nest_end(skb, action_start);
break;
default:
/* Ignore all other option to be future compatible */
break;
}
}
nla_nest_end(skb, start);
return 0;
out:
nla_nest_cancel(skb, start);
return err;
}
static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
{
const struct nlattr *ovs_key = nla_data(a);
int key_type = nla_type(ovs_key);
struct nlattr *start;
int err;
switch (key_type) {
case OVS_KEY_ATTR_TUNNEL_INFO: {
struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SET);
if (!start)
return -EMSGSIZE;
err = ip_tun_to_nlattr(skb, &tun_info->key,
ip_tunnel_info_opts(tun_info),
tun_info->options_len,
ip_tunnel_info_af(tun_info), tun_info->mode);
if (err)
return err;
nla_nest_end(skb, start);
break;
}
default:
if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
return -EMSGSIZE;
break;
}
return 0;
}
static int masked_set_action_to_set_action_attr(const struct nlattr *a,
struct sk_buff *skb)
{
const struct nlattr *ovs_key = nla_data(a);
struct nlattr *nla;
size_t key_len = nla_len(ovs_key) / 2;
/* Revert the conversion we did from a non-masked set action to
* masked set action.
*/
nla = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SET);
if (!nla)
return -EMSGSIZE;
if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
return -EMSGSIZE;
nla_nest_end(skb, nla);
return 0;
}
int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
{
const struct nlattr *a;
int rem, err;
nla_for_each_attr(a, attr, len, rem) {
int type = nla_type(a);
switch (type) {
case OVS_ACTION_ATTR_SET:
err = set_action_to_attr(a, skb);
if (err)
return err;
break;
case OVS_ACTION_ATTR_SET_TO_MASKED:
err = masked_set_action_to_set_action_attr(a, skb);
if (err)
return err;
break;
case OVS_ACTION_ATTR_SAMPLE:
err = sample_action_to_attr(a, skb);
if (err)
return err;
break;
case OVS_ACTION_ATTR_CT:
err = ovs_ct_action_to_attr(nla_data(a), skb);
if (err)
return err;
break;
case OVS_ACTION_ATTR_CLONE:
err = clone_action_to_attr(a, skb);
if (err)
return err;
break;
case OVS_ACTION_ATTR_CHECK_PKT_LEN:
err = check_pkt_len_action_to_attr(a, skb);
if (err)
return err;
break;
case OVS_ACTION_ATTR_DEC_TTL:
err = dec_ttl_action_to_attr(a, skb);
if (err)
return err;
break;
default:
if (nla_put(skb, type, nla_len(a), nla_data(a)))
return -EMSGSIZE;
break;
}
}
return 0;
}
| linux-master | net/openvswitch/flow_netlink.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2017 Nicira, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/openvswitch.h>
#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in6.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <net/dst.h>
#include <net/gso.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/checksum.h>
#include <net/dsfield.h>
#include <net/mpls.h>
#include <net/sctp/checksum.h>
#include "datapath.h"
#include "drop.h"
#include "flow.h"
#include "conntrack.h"
#include "vport.h"
#include "flow_netlink.h"
#include "openvswitch_trace.h"
struct deferred_action {
struct sk_buff *skb;
const struct nlattr *actions;
int actions_len;
/* Store pkt_key clone when creating deferred action. */
struct sw_flow_key pkt_key;
};
#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
struct ovs_frag_data {
unsigned long dst;
struct vport *vport;
struct ovs_skb_cb cb;
__be16 inner_protocol;
u16 network_offset; /* valid only for MPLS */
u16 vlan_tci;
__be16 vlan_proto;
unsigned int l2_len;
u8 mac_proto;
u8 l2_data[MAX_L2_LEN];
};
static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
#define DEFERRED_ACTION_FIFO_SIZE 10
#define OVS_RECURSION_LIMIT 5
#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
struct action_fifo {
int head;
int tail;
/* Deferred action fifo queue storage. */
struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
};
struct action_flow_keys {
struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
};
static struct action_fifo __percpu *action_fifos;
static struct action_flow_keys __percpu *flow_keys;
static DEFINE_PER_CPU(int, exec_actions_level);
/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
* space. Return NULL if out of key spaces.
*/
static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
{
struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
int level = this_cpu_read(exec_actions_level);
struct sw_flow_key *key = NULL;
if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
key = &keys->key[level - 1];
*key = *key_;
}
return key;
}
static void action_fifo_init(struct action_fifo *fifo)
{
fifo->head = 0;
fifo->tail = 0;
}
static bool action_fifo_is_empty(const struct action_fifo *fifo)
{
return (fifo->head == fifo->tail);
}
static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
{
if (action_fifo_is_empty(fifo))
return NULL;
return &fifo->fifo[fifo->tail++];
}
static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
{
if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
return NULL;
return &fifo->fifo[fifo->head++];
}
/* Return true if fifo is not full */
static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
const struct sw_flow_key *key,
const struct nlattr *actions,
const int actions_len)
{
struct action_fifo *fifo;
struct deferred_action *da;
fifo = this_cpu_ptr(action_fifos);
da = action_fifo_put(fifo);
if (da) {
da->skb = skb;
da->actions = actions;
da->actions_len = actions_len;
da->pkt_key = *key;
}
return da;
}
static void invalidate_flow_key(struct sw_flow_key *key)
{
key->mac_proto |= SW_FLOW_KEY_INVALID;
}
static bool is_flow_key_valid(const struct sw_flow_key *key)
{
return !(key->mac_proto & SW_FLOW_KEY_INVALID);
}
static int clone_execute(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
u32 recirc_id,
const struct nlattr *actions, int len,
bool last, bool clone_flow_key);
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
const struct nlattr *attr, int len);
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
__be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
{
int err;
err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
if (err)
return err;
if (!mac_len)
key->mac_proto = MAC_PROTO_NONE;
invalidate_flow_key(key);
return 0;
}
static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
const __be16 ethertype)
{
int err;
err = skb_mpls_pop(skb, ethertype, skb->mac_len,
ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
if (err)
return err;
if (ethertype == htons(ETH_P_TEB))
key->mac_proto = MAC_PROTO_ETHERNET;
invalidate_flow_key(key);
return 0;
}
static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
const __be32 *mpls_lse, const __be32 *mask)
{
struct mpls_shim_hdr *stack;
__be32 lse;
int err;
if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
return -ENOMEM;
stack = mpls_hdr(skb);
lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
err = skb_mpls_update_lse(skb, lse);
if (err)
return err;
flow_key->mpls.lse[0] = lse;
return 0;
}
static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
{
int err;
err = skb_vlan_pop(skb);
if (skb_vlan_tag_present(skb)) {
invalidate_flow_key(key);
} else {
key->eth.vlan.tci = 0;
key->eth.vlan.tpid = 0;
}
return err;
}
static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_vlan *vlan)
{
if (skb_vlan_tag_present(skb)) {
invalidate_flow_key(key);
} else {
key->eth.vlan.tci = vlan->vlan_tci;
key->eth.vlan.tpid = vlan->vlan_tpid;
}
return skb_vlan_push(skb, vlan->vlan_tpid,
ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
}
/* 'src' is already properly masked. */
static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
{
u16 *dst = (u16 *)dst_;
const u16 *src = (const u16 *)src_;
const u16 *mask = (const u16 *)mask_;
OVS_SET_MASKED(dst[0], src[0], mask[0]);
OVS_SET_MASKED(dst[1], src[1], mask[1]);
OVS_SET_MASKED(dst[2], src[2], mask[2]);
}
static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
const struct ovs_key_ethernet *key,
const struct ovs_key_ethernet *mask)
{
int err;
err = skb_ensure_writable(skb, ETH_HLEN);
if (unlikely(err))
return err;
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
mask->eth_src);
ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
mask->eth_dst);
skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
return 0;
}
/* pop_eth does not support VLAN packets as this action is never called
* for them.
*/
static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
{
int err;
err = skb_eth_pop(skb);
if (err)
return err;
/* safe right before invalidate_flow_key */
key->mac_proto = MAC_PROTO_NONE;
invalidate_flow_key(key);
return 0;
}
static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_eth *ethh)
{
int err;
err = skb_eth_push(skb, ethh->addresses.eth_dst,
ethh->addresses.eth_src);
if (err)
return err;
/* safe right before invalidate_flow_key */
key->mac_proto = MAC_PROTO_ETHERNET;
invalidate_flow_key(key);
return 0;
}
static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
const struct nshhdr *nh)
{
int err;
err = nsh_push(skb, nh);
if (err)
return err;
/* safe right before invalidate_flow_key */
key->mac_proto = MAC_PROTO_NONE;
invalidate_flow_key(key);
return 0;
}
static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
{
int err;
err = nsh_pop(skb);
if (err)
return err;
/* safe right before invalidate_flow_key */
if (skb->protocol == htons(ETH_P_TEB))
key->mac_proto = MAC_PROTO_ETHERNET;
else
key->mac_proto = MAC_PROTO_NONE;
invalidate_flow_key(key);
return 0;
}
static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
__be32 addr, __be32 new_addr)
{
int transport_len = skb->len - skb_transport_offset(skb);
if (nh->frag_off & htons(IP_OFFSET))
return;
if (nh->protocol == IPPROTO_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr)))
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
addr, new_addr, true);
} else if (nh->protocol == IPPROTO_UDP) {
if (likely(transport_len >= sizeof(struct udphdr))) {
struct udphdr *uh = udp_hdr(skb);
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&uh->check, skb,
addr, new_addr, true);
if (!uh->check)
uh->check = CSUM_MANGLED_0;
}
}
}
}
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
__be32 *addr, __be32 new_addr)
{
update_ip_l4_checksum(skb, nh, *addr, new_addr);
csum_replace4(&nh->check, *addr, new_addr);
skb_clear_hash(skb);
ovs_ct_clear(skb, NULL);
*addr = new_addr;
}
static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
__be32 addr[4], const __be32 new_addr[4])
{
int transport_len = skb->len - skb_transport_offset(skb);
if (l4_proto == NEXTHDR_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr)))
inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
addr, new_addr, true);
} else if (l4_proto == NEXTHDR_UDP) {
if (likely(transport_len >= sizeof(struct udphdr))) {
struct udphdr *uh = udp_hdr(skb);
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace16(&uh->check, skb,
addr, new_addr, true);
if (!uh->check)
uh->check = CSUM_MANGLED_0;
}
}
} else if (l4_proto == NEXTHDR_ICMP) {
if (likely(transport_len >= sizeof(struct icmp6hdr)))
inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
skb, addr, new_addr, true);
}
}
static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
const __be32 mask[4], __be32 masked[4])
{
masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
}
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
__be32 addr[4], const __be32 new_addr[4],
bool recalculate_csum)
{
if (recalculate_csum)
update_ipv6_checksum(skb, l4_proto, addr, new_addr);
skb_clear_hash(skb);
ovs_ct_clear(skb, NULL);
memcpy(addr, new_addr, sizeof(__be32[4]));
}
static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
{
u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
if (skb->ip_summed == CHECKSUM_COMPLETE)
csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
(__force __wsum)(ipv6_tclass << 12));
ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
}
static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
{
u32 ofl;
ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
fl = OVS_MASKED(ofl, fl, mask);
/* Bits 21-24 are always unmasked, so this retains their values. */
nh->flow_lbl[0] = (u8)(fl >> 16);
nh->flow_lbl[1] = (u8)(fl >> 8);
nh->flow_lbl[2] = (u8)fl;
if (skb->ip_summed == CHECKSUM_COMPLETE)
csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
}
static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
{
new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
if (skb->ip_summed == CHECKSUM_COMPLETE)
csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
(__force __wsum)(new_ttl << 8));
nh->hop_limit = new_ttl;
}
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
u8 mask)
{
new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
nh->ttl = new_ttl;
}
static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
const struct ovs_key_ipv4 *key,
const struct ovs_key_ipv4 *mask)
{
struct iphdr *nh;
__be32 new_addr;
int err;
err = skb_ensure_writable(skb, skb_network_offset(skb) +
sizeof(struct iphdr));
if (unlikely(err))
return err;
nh = ip_hdr(skb);
/* Setting an IP addresses is typically only a side effect of
* matching on them in the current userspace implementation, so it
* makes sense to check if the value actually changed.
*/
if (mask->ipv4_src) {
new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
if (unlikely(new_addr != nh->saddr)) {
set_ip_addr(skb, nh, &nh->saddr, new_addr);
flow_key->ipv4.addr.src = new_addr;
}
}
if (mask->ipv4_dst) {
new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
if (unlikely(new_addr != nh->daddr)) {
set_ip_addr(skb, nh, &nh->daddr, new_addr);
flow_key->ipv4.addr.dst = new_addr;
}
}
if (mask->ipv4_tos) {
ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
flow_key->ip.tos = nh->tos;
}
if (mask->ipv4_ttl) {
set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
flow_key->ip.ttl = nh->ttl;
}
return 0;
}
static bool is_ipv6_mask_nonzero(const __be32 addr[4])
{
return !!(addr[0] | addr[1] | addr[2] | addr[3]);
}
static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
const struct ovs_key_ipv6 *key,
const struct ovs_key_ipv6 *mask)
{
struct ipv6hdr *nh;
int err;
err = skb_ensure_writable(skb, skb_network_offset(skb) +
sizeof(struct ipv6hdr));
if (unlikely(err))
return err;
nh = ipv6_hdr(skb);
/* Setting an IP addresses is typically only a side effect of
* matching on them in the current userspace implementation, so it
* makes sense to check if the value actually changed.
*/
if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
__be32 *saddr = (__be32 *)&nh->saddr;
__be32 masked[4];
mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
true);
memcpy(&flow_key->ipv6.addr.src, masked,
sizeof(flow_key->ipv6.addr.src));
}
}
if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
unsigned int offset = 0;
int flags = IP6_FH_F_SKIP_RH;
bool recalc_csum = true;
__be32 *daddr = (__be32 *)&nh->daddr;
__be32 masked[4];
mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
if (ipv6_ext_hdr(nh->nexthdr))
recalc_csum = (ipv6_find_hdr(skb, &offset,
NEXTHDR_ROUTING,
NULL, &flags)
!= NEXTHDR_ROUTING);
set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
recalc_csum);
memcpy(&flow_key->ipv6.addr.dst, masked,
sizeof(flow_key->ipv6.addr.dst));
}
}
if (mask->ipv6_tclass) {
set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
flow_key->ip.tos = ipv6_get_dsfield(nh);
}
if (mask->ipv6_label) {
set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
ntohl(mask->ipv6_label));
flow_key->ipv6.label =
*(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
}
if (mask->ipv6_hlimit) {
set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
flow_key->ip.ttl = nh->hop_limit;
}
return 0;
}
static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
const struct nlattr *a)
{
struct nshhdr *nh;
size_t length;
int err;
u8 flags;
u8 ttl;
int i;
struct ovs_key_nsh key;
struct ovs_key_nsh mask;
err = nsh_key_from_nlattr(a, &key, &mask);
if (err)
return err;
/* Make sure the NSH base header is there */
if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
return -ENOMEM;
nh = nsh_hdr(skb);
length = nsh_hdr_len(nh);
/* Make sure the whole NSH header is there */
err = skb_ensure_writable(skb, skb_network_offset(skb) +
length);
if (unlikely(err))
return err;
nh = nsh_hdr(skb);
skb_postpull_rcsum(skb, nh, length);
flags = nsh_get_flags(nh);
flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
flow_key->nsh.base.flags = flags;
ttl = nsh_get_ttl(nh);
ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
flow_key->nsh.base.ttl = ttl;
nsh_set_flags_and_ttl(nh, flags, ttl);
nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
mask.base.path_hdr);
flow_key->nsh.base.path_hdr = nh->path_hdr;
switch (nh->mdtype) {
case NSH_M_TYPE1:
for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
nh->md1.context[i] =
OVS_MASKED(nh->md1.context[i], key.context[i],
mask.context[i]);
}
memcpy(flow_key->nsh.context, nh->md1.context,
sizeof(nh->md1.context));
break;
case NSH_M_TYPE2:
memset(flow_key->nsh.context, 0,
sizeof(flow_key->nsh.context));
break;
default:
return -EINVAL;
}
skb_postpush_rcsum(skb, nh, length);
return 0;
}
/* Must follow skb_ensure_writable() since that can move the skb data. */
static void set_tp_port(struct sk_buff *skb, __be16 *port,
__be16 new_port, __sum16 *check)
{
ovs_ct_clear(skb, NULL);
inet_proto_csum_replace2(check, skb, *port, new_port, false);
*port = new_port;
}
static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
const struct ovs_key_udp *key,
const struct ovs_key_udp *mask)
{
struct udphdr *uh;
__be16 src, dst;
int err;
err = skb_ensure_writable(skb, skb_transport_offset(skb) +
sizeof(struct udphdr));
if (unlikely(err))
return err;
uh = udp_hdr(skb);
/* Either of the masks is non-zero, so do not bother checking them. */
src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
if (likely(src != uh->source)) {
set_tp_port(skb, &uh->source, src, &uh->check);
flow_key->tp.src = src;
}
if (likely(dst != uh->dest)) {
set_tp_port(skb, &uh->dest, dst, &uh->check);
flow_key->tp.dst = dst;
}
if (unlikely(!uh->check))
uh->check = CSUM_MANGLED_0;
} else {
uh->source = src;
uh->dest = dst;
flow_key->tp.src = src;
flow_key->tp.dst = dst;
ovs_ct_clear(skb, NULL);
}
skb_clear_hash(skb);
return 0;
}
static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
const struct ovs_key_tcp *key,
const struct ovs_key_tcp *mask)
{
struct tcphdr *th;
__be16 src, dst;
int err;
err = skb_ensure_writable(skb, skb_transport_offset(skb) +
sizeof(struct tcphdr));
if (unlikely(err))
return err;
th = tcp_hdr(skb);
src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
if (likely(src != th->source)) {
set_tp_port(skb, &th->source, src, &th->check);
flow_key->tp.src = src;
}
dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
if (likely(dst != th->dest)) {
set_tp_port(skb, &th->dest, dst, &th->check);
flow_key->tp.dst = dst;
}
skb_clear_hash(skb);
return 0;
}
static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
const struct ovs_key_sctp *key,
const struct ovs_key_sctp *mask)
{
unsigned int sctphoff = skb_transport_offset(skb);
struct sctphdr *sh;
__le32 old_correct_csum, new_csum, old_csum;
int err;
err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
if (unlikely(err))
return err;
sh = sctp_hdr(skb);
old_csum = sh->checksum;
old_correct_csum = sctp_compute_cksum(skb, sctphoff);
sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
new_csum = sctp_compute_cksum(skb, sctphoff);
/* Carry any checksum errors through. */
sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
skb_clear_hash(skb);
ovs_ct_clear(skb, NULL);
flow_key->tp.src = sh->source;
flow_key->tp.dst = sh->dest;
return 0;
}
static int ovs_vport_output(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
struct vport *vport = data->vport;
if (skb_cow_head(skb, data->l2_len) < 0) {
kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
return -ENOMEM;
}
__skb_dst_copy(skb, data->dst);
*OVS_CB(skb) = data->cb;
skb->inner_protocol = data->inner_protocol;
if (data->vlan_tci & VLAN_CFI_MASK)
__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
else
__vlan_hwaccel_clear_tag(skb);
/* Reconstruct the MAC header. */
skb_push(skb, data->l2_len);
memcpy(skb->data, &data->l2_data, data->l2_len);
skb_postpush_rcsum(skb, skb->data, data->l2_len);
skb_reset_mac_header(skb);
if (eth_p_mpls(skb->protocol)) {
skb->inner_network_header = skb->network_header;
skb_set_network_header(skb, data->network_offset);
skb_reset_mac_len(skb);
}
ovs_vport_send(vport, skb, data->mac_proto);
return 0;
}
static unsigned int
ovs_dst_get_mtu(const struct dst_entry *dst)
{
return dst->dev->mtu;
}
static struct dst_ops ovs_dst_ops = {
.family = AF_UNSPEC,
.mtu = ovs_dst_get_mtu,
};
/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
* ovs_vport_output(), which is called once per fragmented packet.
*/
static void prepare_frag(struct vport *vport, struct sk_buff *skb,
u16 orig_network_offset, u8 mac_proto)
{
unsigned int hlen = skb_network_offset(skb);
struct ovs_frag_data *data;
data = this_cpu_ptr(&ovs_frag_data_storage);
data->dst = skb->_skb_refdst;
data->vport = vport;
data->cb = *OVS_CB(skb);
data->inner_protocol = skb->inner_protocol;
data->network_offset = orig_network_offset;
if (skb_vlan_tag_present(skb))
data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
else
data->vlan_tci = 0;
data->vlan_proto = skb->vlan_proto;
data->mac_proto = mac_proto;
data->l2_len = hlen;
memcpy(&data->l2_data, skb->data, hlen);
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
skb_pull(skb, hlen);
}
static void ovs_fragment(struct net *net, struct vport *vport,
struct sk_buff *skb, u16 mru,
struct sw_flow_key *key)
{
enum ovs_drop_reason reason;
u16 orig_network_offset = 0;
if (eth_p_mpls(skb->protocol)) {
orig_network_offset = skb_network_offset(skb);
skb->network_header = skb->inner_network_header;
}
if (skb_network_offset(skb) > MAX_L2_LEN) {
OVS_NLERR(1, "L2 header too long to fragment");
reason = OVS_DROP_FRAG_L2_TOO_LONG;
goto err;
}
if (key->eth.type == htons(ETH_P_IP)) {
struct rtable ovs_rt = { 0 };
unsigned long orig_dst;
prepare_frag(vport, skb, orig_network_offset,
ovs_key_mac_proto(key));
dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
DST_OBSOLETE_NONE, DST_NOCOUNT);
ovs_rt.dst.dev = vport->dev;
orig_dst = skb->_skb_refdst;
skb_dst_set_noref(skb, &ovs_rt.dst);
IPCB(skb)->frag_max_size = mru;
ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
refdst_drop(orig_dst);
} else if (key->eth.type == htons(ETH_P_IPV6)) {
unsigned long orig_dst;
struct rt6_info ovs_rt;
prepare_frag(vport, skb, orig_network_offset,
ovs_key_mac_proto(key));
memset(&ovs_rt, 0, sizeof(ovs_rt));
dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
DST_OBSOLETE_NONE, DST_NOCOUNT);
ovs_rt.dst.dev = vport->dev;
orig_dst = skb->_skb_refdst;
skb_dst_set_noref(skb, &ovs_rt.dst);
IP6CB(skb)->frag_max_size = mru;
ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
refdst_drop(orig_dst);
} else {
WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
ovs_vport_name(vport), ntohs(key->eth.type), mru,
vport->dev->mtu);
reason = OVS_DROP_FRAG_INVALID_PROTO;
goto err;
}
return;
err:
ovs_kfree_skb_reason(skb, reason);
}
static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
struct sw_flow_key *key)
{
struct vport *vport = ovs_vport_rcu(dp, out_port);
if (likely(vport && netif_carrier_ok(vport->dev))) {
u16 mru = OVS_CB(skb)->mru;
u32 cutlen = OVS_CB(skb)->cutlen;
if (unlikely(cutlen > 0)) {
if (skb->len - cutlen > ovs_mac_header_len(key))
pskb_trim(skb, skb->len - cutlen);
else
pskb_trim(skb, ovs_mac_header_len(key));
}
if (likely(!mru ||
(skb->len <= mru + vport->dev->hard_header_len))) {
ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
} else if (mru <= vport->dev->mtu) {
struct net *net = read_pnet(&dp->net);
ovs_fragment(net, vport, skb, mru, key);
} else {
kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
}
} else {
kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY);
}
}
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key, const struct nlattr *attr,
const struct nlattr *actions, int actions_len,
uint32_t cutlen)
{
struct dp_upcall_info upcall;
const struct nlattr *a;
int rem;
memset(&upcall, 0, sizeof(upcall));
upcall.cmd = OVS_PACKET_CMD_ACTION;
upcall.mru = OVS_CB(skb)->mru;
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
a = nla_next(a, &rem)) {
switch (nla_type(a)) {
case OVS_USERSPACE_ATTR_USERDATA:
upcall.userdata = a;
break;
case OVS_USERSPACE_ATTR_PID:
if (dp->user_features &
OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
upcall.portid =
ovs_dp_get_upcall_portid(dp,
smp_processor_id());
else
upcall.portid = nla_get_u32(a);
break;
case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
/* Get out tunnel info. */
struct vport *vport;
vport = ovs_vport_rcu(dp, nla_get_u32(a));
if (vport) {
int err;
err = dev_fill_metadata_dst(vport->dev, skb);
if (!err)
upcall.egress_tun_info = skb_tunnel_info(skb);
}
break;
}
case OVS_USERSPACE_ATTR_ACTIONS: {
/* Include actions. */
upcall.actions = actions;
upcall.actions_len = actions_len;
break;
}
} /* End of switch. */
}
return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
}
static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
const struct nlattr *attr)
{
/* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
struct nlattr *actions = nla_data(attr);
if (nla_len(actions))
return clone_execute(dp, skb, key, 0, nla_data(actions),
nla_len(actions), true, false);
ovs_kfree_skb_reason(skb, OVS_DROP_IP_TTL);
return 0;
}
/* When 'last' is true, sample() should always consume the 'skb'.
* Otherwise, sample() should keep 'skb' intact regardless what
* actions are executed within sample().
*/
static int sample(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key, const struct nlattr *attr,
bool last)
{
struct nlattr *actions;
struct nlattr *sample_arg;
int rem = nla_len(attr);
const struct sample_arg *arg;
bool clone_flow_key;
/* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
sample_arg = nla_data(attr);
arg = nla_data(sample_arg);
actions = nla_next(sample_arg, &rem);
if ((arg->probability != U32_MAX) &&
(!arg->probability || get_random_u32() > arg->probability)) {
if (last)
ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
return 0;
}
clone_flow_key = !arg->exec;
return clone_execute(dp, skb, key, 0, actions, rem, last,
clone_flow_key);
}
/* When 'last' is true, clone() should always consume the 'skb'.
* Otherwise, clone() should keep 'skb' intact regardless what
* actions are executed within clone().
*/
static int clone(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key, const struct nlattr *attr,
bool last)
{
struct nlattr *actions;
struct nlattr *clone_arg;
int rem = nla_len(attr);
bool dont_clone_flow_key;
/* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
clone_arg = nla_data(attr);
dont_clone_flow_key = nla_get_u32(clone_arg);
actions = nla_next(clone_arg, &rem);
return clone_execute(dp, skb, key, 0, actions, rem, last,
!dont_clone_flow_key);
}
static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
const struct nlattr *attr)
{
struct ovs_action_hash *hash_act = nla_data(attr);
u32 hash = 0;
if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
/* OVS_HASH_ALG_L4 hasing type. */
hash = skb_get_hash(skb);
} else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
/* OVS_HASH_ALG_SYM_L4 hashing type. NOTE: this doesn't
* extend past an encapsulated header.
*/
hash = __skb_get_hash_symmetric(skb);
}
hash = jhash_1word(hash, hash_act->hash_basis);
if (!hash)
hash = 0x1;
key->ovs_flow_hash = hash;
}
static int execute_set_action(struct sk_buff *skb,
struct sw_flow_key *flow_key,
const struct nlattr *a)
{
/* Only tunnel set execution is supported without a mask. */
if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
struct ovs_tunnel_info *tun = nla_data(a);
skb_dst_drop(skb);
dst_hold((struct dst_entry *)tun->tun_dst);
skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
return 0;
}
return -EINVAL;
}
/* Mask is at the midpoint of the data. */
#define get_mask(a, type) ((const type)nla_data(a) + 1)
static int execute_masked_set_action(struct sk_buff *skb,
struct sw_flow_key *flow_key,
const struct nlattr *a)
{
int err = 0;
switch (nla_type(a)) {
case OVS_KEY_ATTR_PRIORITY:
OVS_SET_MASKED(skb->priority, nla_get_u32(a),
*get_mask(a, u32 *));
flow_key->phy.priority = skb->priority;
break;
case OVS_KEY_ATTR_SKB_MARK:
OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
flow_key->phy.skb_mark = skb->mark;
break;
case OVS_KEY_ATTR_TUNNEL_INFO:
/* Masked data not supported for tunnel. */
err = -EINVAL;
break;
case OVS_KEY_ATTR_ETHERNET:
err = set_eth_addr(skb, flow_key, nla_data(a),
get_mask(a, struct ovs_key_ethernet *));
break;
case OVS_KEY_ATTR_NSH:
err = set_nsh(skb, flow_key, a);
break;
case OVS_KEY_ATTR_IPV4:
err = set_ipv4(skb, flow_key, nla_data(a),
get_mask(a, struct ovs_key_ipv4 *));
break;
case OVS_KEY_ATTR_IPV6:
err = set_ipv6(skb, flow_key, nla_data(a),
get_mask(a, struct ovs_key_ipv6 *));
break;
case OVS_KEY_ATTR_TCP:
err = set_tcp(skb, flow_key, nla_data(a),
get_mask(a, struct ovs_key_tcp *));
break;
case OVS_KEY_ATTR_UDP:
err = set_udp(skb, flow_key, nla_data(a),
get_mask(a, struct ovs_key_udp *));
break;
case OVS_KEY_ATTR_SCTP:
err = set_sctp(skb, flow_key, nla_data(a),
get_mask(a, struct ovs_key_sctp *));
break;
case OVS_KEY_ATTR_MPLS:
err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
__be32 *));
break;
case OVS_KEY_ATTR_CT_STATE:
case OVS_KEY_ATTR_CT_ZONE:
case OVS_KEY_ATTR_CT_MARK:
case OVS_KEY_ATTR_CT_LABELS:
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
err = -EINVAL;
break;
}
return err;
}
static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
const struct nlattr *a, bool last)
{
u32 recirc_id;
if (!is_flow_key_valid(key)) {
int err;
err = ovs_flow_key_update(skb, key);
if (err)
return err;
}
BUG_ON(!is_flow_key_valid(key));
recirc_id = nla_get_u32(a);
return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
}
static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
const struct nlattr *attr, bool last)
{
struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
const struct nlattr *actions, *cpl_arg;
int len, max_len, rem = nla_len(attr);
const struct check_pkt_len_arg *arg;
bool clone_flow_key;
/* The first netlink attribute in 'attr' is always
* 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
*/
cpl_arg = nla_data(attr);
arg = nla_data(cpl_arg);
len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
max_len = arg->pkt_len;
if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
len <= max_len) {
/* Second netlink attribute in 'attr' is always
* 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
*/
actions = nla_next(cpl_arg, &rem);
clone_flow_key = !arg->exec_for_lesser_equal;
} else {
/* Third netlink attribute in 'attr' is always
* 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
*/
actions = nla_next(cpl_arg, &rem);
actions = nla_next(actions, &rem);
clone_flow_key = !arg->exec_for_greater;
}
return clone_execute(dp, skb, key, 0, nla_data(actions),
nla_len(actions), last, clone_flow_key);
}
static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
{
int err;
if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *nh;
err = skb_ensure_writable(skb, skb_network_offset(skb) +
sizeof(*nh));
if (unlikely(err))
return err;
nh = ipv6_hdr(skb);
if (nh->hop_limit <= 1)
return -EHOSTUNREACH;
key->ip.ttl = --nh->hop_limit;
} else if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *nh;
u8 old_ttl;
err = skb_ensure_writable(skb, skb_network_offset(skb) +
sizeof(*nh));
if (unlikely(err))
return err;
nh = ip_hdr(skb);
if (nh->ttl <= 1)
return -EHOSTUNREACH;
old_ttl = nh->ttl--;
csum_replace2(&nh->check, htons(old_ttl << 8),
htons(nh->ttl << 8));
key->ip.ttl = nh->ttl;
}
return 0;
}
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
const struct nlattr *attr, int len)
{
const struct nlattr *a;
int rem;
for (a = attr, rem = len; rem > 0;
a = nla_next(a, &rem)) {
int err = 0;
if (trace_ovs_do_execute_action_enabled())
trace_ovs_do_execute_action(dp, skb, key, a, rem);
/* Actions that rightfully have to consume the skb should do it
* and return directly.
*/
switch (nla_type(a)) {
case OVS_ACTION_ATTR_OUTPUT: {
int port = nla_get_u32(a);
struct sk_buff *clone;
/* Every output action needs a separate clone
* of 'skb', In case the output action is the
* last action, cloning can be avoided.
*/
if (nla_is_last(a, rem)) {
do_output(dp, skb, port, key);
/* 'skb' has been used for output.
*/
return 0;
}
clone = skb_clone(skb, GFP_ATOMIC);
if (clone)
do_output(dp, clone, port, key);
OVS_CB(skb)->cutlen = 0;
break;
}
case OVS_ACTION_ATTR_TRUNC: {
struct ovs_action_trunc *trunc = nla_data(a);
if (skb->len > trunc->max_len)
OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
break;
}
case OVS_ACTION_ATTR_USERSPACE:
output_userspace(dp, skb, key, a, attr,
len, OVS_CB(skb)->cutlen);
OVS_CB(skb)->cutlen = 0;
if (nla_is_last(a, rem)) {
consume_skb(skb);
return 0;
}
break;
case OVS_ACTION_ATTR_HASH:
execute_hash(skb, key, a);
break;
case OVS_ACTION_ATTR_PUSH_MPLS: {
struct ovs_action_push_mpls *mpls = nla_data(a);
err = push_mpls(skb, key, mpls->mpls_lse,
mpls->mpls_ethertype, skb->mac_len);
break;
}
case OVS_ACTION_ATTR_ADD_MPLS: {
struct ovs_action_add_mpls *mpls = nla_data(a);
__u16 mac_len = 0;
if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
mac_len = skb->mac_len;
err = push_mpls(skb, key, mpls->mpls_lse,
mpls->mpls_ethertype, mac_len);
break;
}
case OVS_ACTION_ATTR_POP_MPLS:
err = pop_mpls(skb, key, nla_get_be16(a));
break;
case OVS_ACTION_ATTR_PUSH_VLAN:
err = push_vlan(skb, key, nla_data(a));
break;
case OVS_ACTION_ATTR_POP_VLAN:
err = pop_vlan(skb, key);
break;
case OVS_ACTION_ATTR_RECIRC: {
bool last = nla_is_last(a, rem);
err = execute_recirc(dp, skb, key, a, last);
if (last) {
/* If this is the last action, the skb has
* been consumed or freed.
* Return immediately.
*/
return err;
}
break;
}
case OVS_ACTION_ATTR_SET:
err = execute_set_action(skb, key, nla_data(a));
break;
case OVS_ACTION_ATTR_SET_MASKED:
case OVS_ACTION_ATTR_SET_TO_MASKED:
err = execute_masked_set_action(skb, key, nla_data(a));
break;
case OVS_ACTION_ATTR_SAMPLE: {
bool last = nla_is_last(a, rem);
err = sample(dp, skb, key, a, last);
if (last)
return err;
break;
}
case OVS_ACTION_ATTR_CT:
if (!is_flow_key_valid(key)) {
err = ovs_flow_key_update(skb, key);
if (err)
return err;
}
err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
nla_data(a));
/* Hide stolen IP fragments from user space. */
if (err)
return err == -EINPROGRESS ? 0 : err;
break;
case OVS_ACTION_ATTR_CT_CLEAR:
err = ovs_ct_clear(skb, key);
break;
case OVS_ACTION_ATTR_PUSH_ETH:
err = push_eth(skb, key, nla_data(a));
break;
case OVS_ACTION_ATTR_POP_ETH:
err = pop_eth(skb, key);
break;
case OVS_ACTION_ATTR_PUSH_NSH: {
u8 buffer[NSH_HDR_MAX_LEN];
struct nshhdr *nh = (struct nshhdr *)buffer;
err = nsh_hdr_from_nlattr(nla_data(a), nh,
NSH_HDR_MAX_LEN);
if (unlikely(err))
break;
err = push_nsh(skb, key, nh);
break;
}
case OVS_ACTION_ATTR_POP_NSH:
err = pop_nsh(skb, key);
break;
case OVS_ACTION_ATTR_METER:
if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
ovs_kfree_skb_reason(skb, OVS_DROP_METER);
return 0;
}
break;
case OVS_ACTION_ATTR_CLONE: {
bool last = nla_is_last(a, rem);
err = clone(dp, skb, key, a, last);
if (last)
return err;
break;
}
case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
bool last = nla_is_last(a, rem);
err = execute_check_pkt_len(dp, skb, key, a, last);
if (last)
return err;
break;
}
case OVS_ACTION_ATTR_DEC_TTL:
err = execute_dec_ttl(skb, key);
if (err == -EHOSTUNREACH)
return dec_ttl_exception_handler(dp, skb,
key, a);
break;
case OVS_ACTION_ATTR_DROP: {
enum ovs_drop_reason reason = nla_get_u32(a)
? OVS_DROP_EXPLICIT_WITH_ERROR
: OVS_DROP_EXPLICIT;
ovs_kfree_skb_reason(skb, reason);
return 0;
}
}
if (unlikely(err)) {
ovs_kfree_skb_reason(skb, OVS_DROP_ACTION_ERROR);
return err;
}
}
ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
return 0;
}
/* Execute the actions on the clone of the packet. The effect of the
* execution does not affect the original 'skb' nor the original 'key'.
*
* The execution may be deferred in case the actions can not be executed
* immediately.
*/
static int clone_execute(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key, u32 recirc_id,
const struct nlattr *actions, int len,
bool last, bool clone_flow_key)
{
struct deferred_action *da;
struct sw_flow_key *clone;
skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb) {
/* Out of memory, skip this action.
*/
return 0;
}
/* When clone_flow_key is false, the 'key' will not be change
* by the actions, then the 'key' can be used directly.
* Otherwise, try to clone key from the next recursion level of
* 'flow_keys'. If clone is successful, execute the actions
* without deferring.
*/
clone = clone_flow_key ? clone_key(key) : key;
if (clone) {
int err = 0;
if (actions) { /* Sample action */
if (clone_flow_key)
__this_cpu_inc(exec_actions_level);
err = do_execute_actions(dp, skb, clone,
actions, len);
if (clone_flow_key)
__this_cpu_dec(exec_actions_level);
} else { /* Recirc action */
clone->recirc_id = recirc_id;
ovs_dp_process_packet(skb, clone);
}
return err;
}
/* Out of 'flow_keys' space. Defer actions */
da = add_deferred_actions(skb, key, actions, len);
if (da) {
if (!actions) { /* Recirc action */
key = &da->pkt_key;
key->recirc_id = recirc_id;
}
} else {
/* Out of per CPU action FIFO space. Drop the 'skb' and
* log an error.
*/
ovs_kfree_skb_reason(skb, OVS_DROP_DEFERRED_LIMIT);
if (net_ratelimit()) {
if (actions) { /* Sample action */
pr_warn("%s: deferred action limit reached, drop sample action\n",
ovs_dp_name(dp));
} else { /* Recirc action */
pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
ovs_dp_name(dp), recirc_id);
}
}
}
return 0;
}
static void process_deferred_actions(struct datapath *dp)
{
struct action_fifo *fifo = this_cpu_ptr(action_fifos);
/* Do not touch the FIFO in case there is no deferred actions. */
if (action_fifo_is_empty(fifo))
return;
/* Finishing executing all deferred actions. */
do {
struct deferred_action *da = action_fifo_get(fifo);
struct sk_buff *skb = da->skb;
struct sw_flow_key *key = &da->pkt_key;
const struct nlattr *actions = da->actions;
int actions_len = da->actions_len;
if (actions)
do_execute_actions(dp, skb, key, actions, actions_len);
else
ovs_dp_process_packet(skb, key);
} while (!action_fifo_is_empty(fifo));
/* Reset FIFO for the next packet. */
action_fifo_init(fifo);
}
/* Execute a list of actions against 'skb'. */
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_actions *acts,
struct sw_flow_key *key)
{
int err, level;
level = __this_cpu_inc_return(exec_actions_level);
if (unlikely(level > OVS_RECURSION_LIMIT)) {
net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
ovs_dp_name(dp));
ovs_kfree_skb_reason(skb, OVS_DROP_RECURSION_LIMIT);
err = -ENETDOWN;
goto out;
}
OVS_CB(skb)->acts_origlen = acts->orig_len;
err = do_execute_actions(dp, skb, key,
acts->actions, acts->actions_len);
if (level == 1)
process_deferred_actions(dp);
out:
__this_cpu_dec(exec_actions_level);
return err;
}
int action_fifos_init(void)
{
action_fifos = alloc_percpu(struct action_fifo);
if (!action_fifos)
return -ENOMEM;
flow_keys = alloc_percpu(struct action_flow_keys);
if (!flow_keys) {
free_percpu(action_fifos);
return -ENOMEM;
}
return 0;
}
void action_fifos_exit(void)
{
free_percpu(action_fifos);
free_percpu(flow_keys);
}
| linux-master | net/openvswitch/actions.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2014 Nicira, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/net.h>
#include <linux/rculist.h>
#include <linux/udp.h>
#include <linux/if_vlan.h>
#include <linux/module.h>
#include <net/geneve.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/udp.h>
#include <net/xfrm.h>
#include "datapath.h"
#include "vport.h"
#include "vport-netdev.h"
static struct vport_ops ovs_geneve_vport_ops;
/**
* struct geneve_port - Keeps track of open UDP ports
* @dst_port: destination port.
*/
struct geneve_port {
u16 dst_port;
};
static inline struct geneve_port *geneve_vport(const struct vport *vport)
{
return vport_priv(vport);
}
static int geneve_get_options(const struct vport *vport,
struct sk_buff *skb)
{
struct geneve_port *geneve_port = geneve_vport(vport);
if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, geneve_port->dst_port))
return -EMSGSIZE;
return 0;
}
static struct vport *geneve_tnl_create(const struct vport_parms *parms)
{
struct net *net = ovs_dp_get_net(parms->dp);
struct nlattr *options = parms->options;
struct geneve_port *geneve_port;
struct net_device *dev;
struct vport *vport;
struct nlattr *a;
u16 dst_port;
int err;
if (!options) {
err = -EINVAL;
goto error;
}
a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
if (a && nla_len(a) == sizeof(u16)) {
dst_port = nla_get_u16(a);
} else {
/* Require destination port from userspace. */
err = -EINVAL;
goto error;
}
vport = ovs_vport_alloc(sizeof(struct geneve_port),
&ovs_geneve_vport_ops, parms);
if (IS_ERR(vport))
return vport;
geneve_port = geneve_vport(vport);
geneve_port->dst_port = dst_port;
rtnl_lock();
dev = geneve_dev_create_fb(net, parms->name, NET_NAME_USER, dst_port);
if (IS_ERR(dev)) {
rtnl_unlock();
ovs_vport_free(vport);
return ERR_CAST(dev);
}
err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
rtnl_delete_link(dev, 0, NULL);
rtnl_unlock();
ovs_vport_free(vport);
goto error;
}
rtnl_unlock();
return vport;
error:
return ERR_PTR(err);
}
static struct vport *geneve_create(const struct vport_parms *parms)
{
struct vport *vport;
vport = geneve_tnl_create(parms);
if (IS_ERR(vport))
return vport;
return ovs_netdev_link(vport, parms->name);
}
static struct vport_ops ovs_geneve_vport_ops = {
.type = OVS_VPORT_TYPE_GENEVE,
.create = geneve_create,
.destroy = ovs_netdev_tunnel_destroy,
.get_options = geneve_get_options,
.send = dev_queue_xmit,
};
static int __init ovs_geneve_tnl_init(void)
{
return ovs_vport_ops_register(&ovs_geneve_vport_ops);
}
static void __exit ovs_geneve_tnl_exit(void)
{
ovs_vport_ops_unregister(&ovs_geneve_vport_ops);
}
module_init(ovs_geneve_tnl_init);
module_exit(ovs_geneve_tnl_exit);
MODULE_DESCRIPTION("OVS: Geneve switching port");
MODULE_LICENSE("GPL");
MODULE_ALIAS("vport-type-5");
| linux-master | net/openvswitch/vport-geneve.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2014 Nicira, Inc.
*/
#include "flow.h"
#include "datapath.h"
#include "flow_netlink.h"
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <net/llc_pdu.h>
#include <linux/kernel.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/llc.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/rcupdate.h>
#include <linux/cpumask.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/rculist.h>
#include <linux/sort.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#define TBL_MIN_BUCKETS 1024
#define MASK_ARRAY_SIZE_MIN 16
#define REHASH_INTERVAL (10 * 60 * HZ)
#define MC_DEFAULT_HASH_ENTRIES 256
#define MC_HASH_SHIFT 8
#define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
static struct kmem_cache *flow_cache;
struct kmem_cache *flow_stats_cache __read_mostly;
static u16 range_n_bytes(const struct sw_flow_key_range *range)
{
return range->end - range->start;
}
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
bool full, const struct sw_flow_mask *mask)
{
int start = full ? 0 : mask->range.start;
int len = full ? sizeof *dst : range_n_bytes(&mask->range);
const long *m = (const long *)((const u8 *)&mask->key + start);
const long *s = (const long *)((const u8 *)src + start);
long *d = (long *)((u8 *)dst + start);
int i;
/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
* if 'full' is false the memory outside of the 'mask->range' is left
* uninitialized. This can be used as an optimization when further
* operations on 'dst' only use contents within 'mask->range'.
*/
for (i = 0; i < len; i += sizeof(long))
*d++ = *s++ & *m++;
}
struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
struct sw_flow_stats *stats;
flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
if (!flow)
return ERR_PTR(-ENOMEM);
flow->stats_last_writer = -1;
flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
/* Initialize the default stat node. */
stats = kmem_cache_alloc_node(flow_stats_cache,
GFP_KERNEL | __GFP_ZERO,
node_online(0) ? 0 : NUMA_NO_NODE);
if (!stats)
goto err;
spin_lock_init(&stats->lock);
RCU_INIT_POINTER(flow->stats[0], stats);
cpumask_set_cpu(0, flow->cpu_used_mask);
return flow;
err:
kmem_cache_free(flow_cache, flow);
return ERR_PTR(-ENOMEM);
}
int ovs_flow_tbl_count(const struct flow_table *table)
{
return table->count;
}
static void flow_free(struct sw_flow *flow)
{
int cpu;
if (ovs_identifier_is_key(&flow->id))
kfree(flow->id.unmasked_key);
if (flow->sf_acts)
ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
flow->sf_acts);
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids;
cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
(struct sw_flow_stats __force *)flow->stats[cpu]);
}
kmem_cache_free(flow_cache, flow);
}
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
flow_free(flow);
}
void ovs_flow_free(struct sw_flow *flow, bool deferred)
{
if (!flow)
return;
if (deferred)
call_rcu(&flow->rcu, rcu_free_flow_callback);
else
flow_free(flow);
}
static void __table_instance_destroy(struct table_instance *ti)
{
kvfree(ti->buckets);
kfree(ti);
}
static struct table_instance *table_instance_alloc(int new_size)
{
struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
int i;
if (!ti)
return NULL;
ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
GFP_KERNEL);
if (!ti->buckets) {
kfree(ti);
return NULL;
}
for (i = 0; i < new_size; i++)
INIT_HLIST_HEAD(&ti->buckets[i]);
ti->n_buckets = new_size;
ti->node_ver = 0;
get_random_bytes(&ti->hash_seed, sizeof(u32));
return ti;
}
static void __mask_array_destroy(struct mask_array *ma)
{
free_percpu(ma->masks_usage_stats);
kfree(ma);
}
static void mask_array_rcu_cb(struct rcu_head *rcu)
{
struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
__mask_array_destroy(ma);
}
static void tbl_mask_array_reset_counters(struct mask_array *ma)
{
int i, cpu;
/* As the per CPU counters are not atomic we can not go ahead and
* reset them from another CPU. To be able to still have an approximate
* zero based counter we store the value at reset, and subtract it
* later when processing.
*/
for (i = 0; i < ma->max; i++) {
ma->masks_usage_zero_cntr[i] = 0;
for_each_possible_cpu(cpu) {
struct mask_array_stats *stats;
unsigned int start;
u64 counter;
stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
do {
start = u64_stats_fetch_begin(&stats->syncp);
counter = stats->usage_cntrs[i];
} while (u64_stats_fetch_retry(&stats->syncp, start));
ma->masks_usage_zero_cntr[i] += counter;
}
}
}
static struct mask_array *tbl_mask_array_alloc(int size)
{
struct mask_array *new;
size = max(MASK_ARRAY_SIZE_MIN, size);
new = kzalloc(sizeof(struct mask_array) +
sizeof(struct sw_flow_mask *) * size +
sizeof(u64) * size, GFP_KERNEL);
if (!new)
return NULL;
new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
sizeof(struct mask_array) +
sizeof(struct sw_flow_mask *) *
size);
new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) +
sizeof(u64) * size,
__alignof__(u64));
if (!new->masks_usage_stats) {
kfree(new);
return NULL;
}
new->count = 0;
new->max = size;
return new;
}
static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
{
struct mask_array *old;
struct mask_array *new;
new = tbl_mask_array_alloc(size);
if (!new)
return -ENOMEM;
old = ovsl_dereference(tbl->mask_array);
if (old) {
int i;
for (i = 0; i < old->max; i++) {
if (ovsl_dereference(old->masks[i]))
new->masks[new->count++] = old->masks[i];
}
call_rcu(&old->rcu, mask_array_rcu_cb);
}
rcu_assign_pointer(tbl->mask_array, new);
return 0;
}
static int tbl_mask_array_add_mask(struct flow_table *tbl,
struct sw_flow_mask *new)
{
struct mask_array *ma = ovsl_dereference(tbl->mask_array);
int err, ma_count = READ_ONCE(ma->count);
if (ma_count >= ma->max) {
err = tbl_mask_array_realloc(tbl, ma->max +
MASK_ARRAY_SIZE_MIN);
if (err)
return err;
ma = ovsl_dereference(tbl->mask_array);
} else {
/* On every add or delete we need to reset the counters so
* every new mask gets a fair chance of being prioritized.
*/
tbl_mask_array_reset_counters(ma);
}
BUG_ON(ovsl_dereference(ma->masks[ma_count]));
rcu_assign_pointer(ma->masks[ma_count], new);
WRITE_ONCE(ma->count, ma_count + 1);
return 0;
}
static void tbl_mask_array_del_mask(struct flow_table *tbl,
struct sw_flow_mask *mask)
{
struct mask_array *ma = ovsl_dereference(tbl->mask_array);
int i, ma_count = READ_ONCE(ma->count);
/* Remove the deleted mask pointers from the array */
for (i = 0; i < ma_count; i++) {
if (mask == ovsl_dereference(ma->masks[i]))
goto found;
}
BUG();
return;
found:
WRITE_ONCE(ma->count, ma_count - 1);
rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
kfree_rcu(mask, rcu);
/* Shrink the mask array if necessary. */
if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
ma_count <= (ma->max / 3))
tbl_mask_array_realloc(tbl, ma->max / 2);
else
tbl_mask_array_reset_counters(ma);
}
/* Remove 'mask' from the mask list, if it is not needed any more. */
static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
{
if (mask) {
/* ovs-lock is required to protect mask-refcount and
* mask list.
*/
ASSERT_OVSL();
BUG_ON(!mask->ref_count);
mask->ref_count--;
if (!mask->ref_count)
tbl_mask_array_del_mask(tbl, mask);
}
}
static void __mask_cache_destroy(struct mask_cache *mc)
{
free_percpu(mc->mask_cache);
kfree(mc);
}
static void mask_cache_rcu_cb(struct rcu_head *rcu)
{
struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
__mask_cache_destroy(mc);
}
static struct mask_cache *tbl_mask_cache_alloc(u32 size)
{
struct mask_cache_entry __percpu *cache = NULL;
struct mask_cache *new;
/* Only allow size to be 0, or a power of 2, and does not exceed
* percpu allocation size.
*/
if ((!is_power_of_2(size) && size != 0) ||
(size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
return NULL;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
new->cache_size = size;
if (new->cache_size > 0) {
cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry),
new->cache_size),
__alignof__(struct mask_cache_entry));
if (!cache) {
kfree(new);
return NULL;
}
}
new->mask_cache = cache;
return new;
}
int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
{
struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
struct mask_cache *new;
if (size == mc->cache_size)
return 0;
if ((!is_power_of_2(size) && size != 0) ||
(size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
return -EINVAL;
new = tbl_mask_cache_alloc(size);
if (!new)
return -ENOMEM;
rcu_assign_pointer(table->mask_cache, new);
call_rcu(&mc->rcu, mask_cache_rcu_cb);
return 0;
}
int ovs_flow_tbl_init(struct flow_table *table)
{
struct table_instance *ti, *ufid_ti;
struct mask_cache *mc;
struct mask_array *ma;
mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
if (!mc)
return -ENOMEM;
ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
if (!ma)
goto free_mask_cache;
ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ti)
goto free_mask_array;
ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ufid_ti)
goto free_ti;
rcu_assign_pointer(table->ti, ti);
rcu_assign_pointer(table->ufid_ti, ufid_ti);
rcu_assign_pointer(table->mask_array, ma);
rcu_assign_pointer(table->mask_cache, mc);
table->last_rehash = jiffies;
table->count = 0;
table->ufid_count = 0;
return 0;
free_ti:
__table_instance_destroy(ti);
free_mask_array:
__mask_array_destroy(ma);
free_mask_cache:
__mask_cache_destroy(mc);
return -ENOMEM;
}
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{
struct table_instance *ti;
ti = container_of(rcu, struct table_instance, rcu);
__table_instance_destroy(ti);
}
static void table_instance_flow_free(struct flow_table *table,
struct table_instance *ti,
struct table_instance *ufid_ti,
struct sw_flow *flow)
{
hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
table->count--;
if (ovs_identifier_is_ufid(&flow->id)) {
hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
table->ufid_count--;
}
flow_mask_remove(table, flow->mask);
}
/* Must be called with OVS mutex held. */
void table_instance_flow_flush(struct flow_table *table,
struct table_instance *ti,
struct table_instance *ufid_ti)
{
int i;
for (i = 0; i < ti->n_buckets; i++) {
struct hlist_head *head = &ti->buckets[i];
struct hlist_node *n;
struct sw_flow *flow;
hlist_for_each_entry_safe(flow, n, head,
flow_table.node[ti->node_ver]) {
table_instance_flow_free(table, ti, ufid_ti,
flow);
ovs_flow_free(flow, true);
}
}
if (WARN_ON(table->count != 0 ||
table->ufid_count != 0)) {
table->count = 0;
table->ufid_count = 0;
}
}
static void table_instance_destroy(struct table_instance *ti,
struct table_instance *ufid_ti)
{
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
}
/* No need for locking this function is called from RCU callback or
* error path.
*/
void ovs_flow_tbl_destroy(struct flow_table *table)
{
struct table_instance *ti = rcu_dereference_raw(table->ti);
struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
struct mask_cache *mc = rcu_dereference_raw(table->mask_cache);
struct mask_array *ma = rcu_dereference_raw(table->mask_array);
call_rcu(&mc->rcu, mask_cache_rcu_cb);
call_rcu(&ma->rcu, mask_array_rcu_cb);
table_instance_destroy(ti, ufid_ti);
}
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
u32 *bucket, u32 *last)
{
struct sw_flow *flow;
struct hlist_head *head;
int ver;
int i;
ver = ti->node_ver;
while (*bucket < ti->n_buckets) {
i = 0;
head = &ti->buckets[*bucket];
hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
if (i < *last) {
i++;
continue;
}
*last = i + 1;
return flow;
}
(*bucket)++;
*last = 0;
}
return NULL;
}
static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
{
hash = jhash_1word(hash, ti->hash_seed);
return &ti->buckets[hash & (ti->n_buckets - 1)];
}
static void table_instance_insert(struct table_instance *ti,
struct sw_flow *flow)
{
struct hlist_head *head;
head = find_bucket(ti, flow->flow_table.hash);
hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
}
static void ufid_table_instance_insert(struct table_instance *ti,
struct sw_flow *flow)
{
struct hlist_head *head;
head = find_bucket(ti, flow->ufid_table.hash);
hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
}
static void flow_table_copy_flows(struct table_instance *old,
struct table_instance *new, bool ufid)
{
int old_ver;
int i;
old_ver = old->node_ver;
new->node_ver = !old_ver;
/* Insert in new table. */
for (i = 0; i < old->n_buckets; i++) {
struct sw_flow *flow;
struct hlist_head *head = &old->buckets[i];
if (ufid)
hlist_for_each_entry_rcu(flow, head,
ufid_table.node[old_ver],
lockdep_ovsl_is_held())
ufid_table_instance_insert(new, flow);
else
hlist_for_each_entry_rcu(flow, head,
flow_table.node[old_ver],
lockdep_ovsl_is_held())
table_instance_insert(new, flow);
}
}
static struct table_instance *table_instance_rehash(struct table_instance *ti,
int n_buckets, bool ufid)
{
struct table_instance *new_ti;
new_ti = table_instance_alloc(n_buckets);
if (!new_ti)
return NULL;
flow_table_copy_flows(ti, new_ti, ufid);
return new_ti;
}
int ovs_flow_tbl_flush(struct flow_table *flow_table)
{
struct table_instance *old_ti, *new_ti;
struct table_instance *old_ufid_ti, *new_ufid_ti;
new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!new_ti)
return -ENOMEM;
new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!new_ufid_ti)
goto err_free_ti;
old_ti = ovsl_dereference(flow_table->ti);
old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
rcu_assign_pointer(flow_table->ti, new_ti);
rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
flow_table->last_rehash = jiffies;
table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
table_instance_destroy(old_ti, old_ufid_ti);
return 0;
err_free_ti:
__table_instance_destroy(new_ti);
return -ENOMEM;
}
static u32 flow_hash(const struct sw_flow_key *key,
const struct sw_flow_key_range *range)
{
const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
/* Make sure number of hash bytes are multiple of u32. */
int hash_u32s = range_n_bytes(range) >> 2;
return jhash2(hash_key, hash_u32s, 0);
}
static int flow_key_start(const struct sw_flow_key *key)
{
if (key->tun_proto)
return 0;
else
return rounddown(offsetof(struct sw_flow_key, phy),
sizeof(long));
}
static bool cmp_key(const struct sw_flow_key *key1,
const struct sw_flow_key *key2,
int key_start, int key_end)
{
const long *cp1 = (const long *)((const u8 *)key1 + key_start);
const long *cp2 = (const long *)((const u8 *)key2 + key_start);
int i;
for (i = key_start; i < key_end; i += sizeof(long))
if (*cp1++ ^ *cp2++)
return false;
return true;
}
static bool flow_cmp_masked_key(const struct sw_flow *flow,
const struct sw_flow_key *key,
const struct sw_flow_key_range *range)
{
return cmp_key(&flow->key, key, range->start, range->end);
}
static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
const struct sw_flow_match *match)
{
struct sw_flow_key *key = match->key;
int key_start = flow_key_start(key);
int key_end = match->range.end;
BUG_ON(ovs_identifier_is_ufid(&flow->id));
return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
}
static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
const struct sw_flow_key *unmasked,
const struct sw_flow_mask *mask,
u32 *n_mask_hit)
{
struct sw_flow *flow;
struct hlist_head *head;
u32 hash;
struct sw_flow_key masked_key;
ovs_flow_mask_key(&masked_key, unmasked, false, mask);
hash = flow_hash(&masked_key, &mask->range);
head = find_bucket(ti, hash);
(*n_mask_hit)++;
hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
lockdep_ovsl_is_held()) {
if (flow->mask == mask && flow->flow_table.hash == hash &&
flow_cmp_masked_key(flow, &masked_key, &mask->range))
return flow;
}
return NULL;
}
/* Flow lookup does full lookup on flow table. It starts with
* mask from index passed in *index.
* This function MUST be called with BH disabled due to the use
* of CPU specific variables.
*/
static struct sw_flow *flow_lookup(struct flow_table *tbl,
struct table_instance *ti,
struct mask_array *ma,
const struct sw_flow_key *key,
u32 *n_mask_hit,
u32 *n_cache_hit,
u32 *index)
{
struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats);
struct sw_flow *flow;
struct sw_flow_mask *mask;
int i;
if (likely(*index < ma->max)) {
mask = rcu_dereference_ovsl(ma->masks[*index]);
if (mask) {
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
if (flow) {
u64_stats_update_begin(&stats->syncp);
stats->usage_cntrs[*index]++;
u64_stats_update_end(&stats->syncp);
(*n_cache_hit)++;
return flow;
}
}
}
for (i = 0; i < ma->max; i++) {
if (i == *index)
continue;
mask = rcu_dereference_ovsl(ma->masks[i]);
if (unlikely(!mask))
break;
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
if (flow) { /* Found */
*index = i;
u64_stats_update_begin(&stats->syncp);
stats->usage_cntrs[*index]++;
u64_stats_update_end(&stats->syncp);
return flow;
}
}
return NULL;
}
/*
* mask_cache maps flow to probable mask. This cache is not tightly
* coupled cache, It means updates to mask list can result in inconsistent
* cache entry in mask cache.
* This is per cpu cache and is divided in MC_HASH_SEGS segments.
* In case of a hash collision the entry is hashed in next segment.
* */
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
const struct sw_flow_key *key,
u32 skb_hash,
u32 *n_mask_hit,
u32 *n_cache_hit)
{
struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
struct mask_array *ma = rcu_dereference(tbl->mask_array);
struct table_instance *ti = rcu_dereference(tbl->ti);
struct mask_cache_entry *entries, *ce;
struct sw_flow *flow;
u32 hash;
int seg;
*n_mask_hit = 0;
*n_cache_hit = 0;
if (unlikely(!skb_hash || mc->cache_size == 0)) {
u32 mask_index = 0;
u32 cache = 0;
return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
&mask_index);
}
/* Pre and post recirulation flows usually have the same skb_hash
* value. To avoid hash collisions, rehash the 'skb_hash' with
* 'recirc_id'. */
if (key->recirc_id)
skb_hash = jhash_1word(skb_hash, key->recirc_id);
ce = NULL;
hash = skb_hash;
entries = this_cpu_ptr(mc->mask_cache);
/* Find the cache entry 'ce' to operate on. */
for (seg = 0; seg < MC_HASH_SEGS; seg++) {
int index = hash & (mc->cache_size - 1);
struct mask_cache_entry *e;
e = &entries[index];
if (e->skb_hash == skb_hash) {
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
n_cache_hit, &e->mask_index);
if (!flow)
e->skb_hash = 0;
return flow;
}
if (!ce || e->skb_hash < ce->skb_hash)
ce = e; /* A better replacement cache candidate. */
hash >>= MC_HASH_SHIFT;
}
/* Cache miss, do full lookup. */
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
&ce->mask_index);
if (flow)
ce->skb_hash = skb_hash;
*n_cache_hit = 0;
return flow;
}
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
const struct sw_flow_key *key)
{
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
u32 __always_unused n_mask_hit;
u32 __always_unused n_cache_hit;
struct sw_flow *flow;
u32 index = 0;
/* This function gets called trough the netlink interface and therefore
* is preemptible. However, flow_lookup() function needs to be called
* with BH disabled due to CPU specific variables.
*/
local_bh_disable();
flow = flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
local_bh_enable();
return flow;
}
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
const struct sw_flow_match *match)
{
struct mask_array *ma = ovsl_dereference(tbl->mask_array);
int i;
/* Always called under ovs-mutex. */
for (i = 0; i < ma->max; i++) {
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
u32 __always_unused n_mask_hit;
struct sw_flow_mask *mask;
struct sw_flow *flow;
mask = ovsl_dereference(ma->masks[i]);
if (!mask)
continue;
flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
if (flow && ovs_identifier_is_key(&flow->id) &&
ovs_flow_cmp_unmasked_key(flow, match)) {
return flow;
}
}
return NULL;
}
static u32 ufid_hash(const struct sw_flow_id *sfid)
{
return jhash(sfid->ufid, sfid->ufid_len, 0);
}
static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
const struct sw_flow_id *sfid)
{
if (flow->id.ufid_len != sfid->ufid_len)
return false;
return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
}
bool ovs_flow_cmp(const struct sw_flow *flow,
const struct sw_flow_match *match)
{
if (ovs_identifier_is_ufid(&flow->id))
return flow_cmp_masked_key(flow, match->key, &match->range);
return ovs_flow_cmp_unmasked_key(flow, match);
}
struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
const struct sw_flow_id *ufid)
{
struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
struct sw_flow *flow;
struct hlist_head *head;
u32 hash;
hash = ufid_hash(ufid);
head = find_bucket(ti, hash);
hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
lockdep_ovsl_is_held()) {
if (flow->ufid_table.hash == hash &&
ovs_flow_cmp_ufid(flow, ufid))
return flow;
}
return NULL;
}
int ovs_flow_tbl_num_masks(const struct flow_table *table)
{
struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
return READ_ONCE(ma->count);
}
u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
{
struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
return READ_ONCE(mc->cache_size);
}
static struct table_instance *table_instance_expand(struct table_instance *ti,
bool ufid)
{
return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
}
/* Must be called with OVS mutex held. */
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
{
struct table_instance *ti = ovsl_dereference(table->ti);
struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
BUG_ON(table->count == 0);
table_instance_flow_free(table, ti, ufid_ti, flow);
}
static struct sw_flow_mask *mask_alloc(void)
{
struct sw_flow_mask *mask;
mask = kmalloc(sizeof(*mask), GFP_KERNEL);
if (mask)
mask->ref_count = 1;
return mask;
}
static bool mask_equal(const struct sw_flow_mask *a,
const struct sw_flow_mask *b)
{
const u8 *a_ = (const u8 *)&a->key + a->range.start;
const u8 *b_ = (const u8 *)&b->key + b->range.start;
return (a->range.end == b->range.end)
&& (a->range.start == b->range.start)
&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
}
static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
const struct sw_flow_mask *mask)
{
struct mask_array *ma;
int i;
ma = ovsl_dereference(tbl->mask_array);
for (i = 0; i < ma->max; i++) {
struct sw_flow_mask *t;
t = ovsl_dereference(ma->masks[i]);
if (t && mask_equal(mask, t))
return t;
}
return NULL;
}
/* Add 'mask' into the mask list, if it is not already there. */
static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
const struct sw_flow_mask *new)
{
struct sw_flow_mask *mask;
mask = flow_mask_find(tbl, new);
if (!mask) {
/* Allocate a new mask if none exists. */
mask = mask_alloc();
if (!mask)
return -ENOMEM;
mask->key = new->key;
mask->range = new->range;
/* Add mask to mask-list. */
if (tbl_mask_array_add_mask(tbl, mask)) {
kfree(mask);
return -ENOMEM;
}
} else {
BUG_ON(!mask->ref_count);
mask->ref_count++;
}
flow->mask = mask;
return 0;
}
/* Must be called with OVS mutex held. */
static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
{
struct table_instance *new_ti = NULL;
struct table_instance *ti;
flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
ti = ovsl_dereference(table->ti);
table_instance_insert(ti, flow);
table->count++;
/* Expand table, if necessary, to make room. */
if (table->count > ti->n_buckets)
new_ti = table_instance_expand(ti, false);
else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
new_ti = table_instance_rehash(ti, ti->n_buckets, false);
if (new_ti) {
rcu_assign_pointer(table->ti, new_ti);
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
table->last_rehash = jiffies;
}
}
/* Must be called with OVS mutex held. */
static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
{
struct table_instance *ti;
flow->ufid_table.hash = ufid_hash(&flow->id);
ti = ovsl_dereference(table->ufid_ti);
ufid_table_instance_insert(ti, flow);
table->ufid_count++;
/* Expand table, if necessary, to make room. */
if (table->ufid_count > ti->n_buckets) {
struct table_instance *new_ti;
new_ti = table_instance_expand(ti, true);
if (new_ti) {
rcu_assign_pointer(table->ufid_ti, new_ti);
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
}
}
}
/* Must be called with OVS mutex held. */
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
const struct sw_flow_mask *mask)
{
int err;
err = flow_mask_insert(table, flow, mask);
if (err)
return err;
flow_key_insert(table, flow);
if (ovs_identifier_is_ufid(&flow->id))
flow_ufid_insert(table, flow);
return 0;
}
static int compare_mask_and_count(const void *a, const void *b)
{
const struct mask_count *mc_a = a;
const struct mask_count *mc_b = b;
return (s64)mc_b->counter - (s64)mc_a->counter;
}
/* Must be called with OVS mutex held. */
void ovs_flow_masks_rebalance(struct flow_table *table)
{
struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
struct mask_count *masks_and_count;
struct mask_array *new;
int masks_entries = 0;
int i;
/* Build array of all current entries with use counters. */
masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
GFP_KERNEL);
if (!masks_and_count)
return;
for (i = 0; i < ma->max; i++) {
struct sw_flow_mask *mask;
int cpu;
mask = rcu_dereference_ovsl(ma->masks[i]);
if (unlikely(!mask))
break;
masks_and_count[i].index = i;
masks_and_count[i].counter = 0;
for_each_possible_cpu(cpu) {
struct mask_array_stats *stats;
unsigned int start;
u64 counter;
stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
do {
start = u64_stats_fetch_begin(&stats->syncp);
counter = stats->usage_cntrs[i];
} while (u64_stats_fetch_retry(&stats->syncp, start));
masks_and_count[i].counter += counter;
}
/* Subtract the zero count value. */
masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
/* Rather than calling tbl_mask_array_reset_counters()
* below when no change is needed, do it inline here.
*/
ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
}
if (i == 0)
goto free_mask_entries;
/* Sort the entries */
masks_entries = i;
sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
compare_mask_and_count, NULL);
/* If the order is the same, nothing to do... */
for (i = 0; i < masks_entries; i++) {
if (i != masks_and_count[i].index)
break;
}
if (i == masks_entries)
goto free_mask_entries;
/* Rebuilt the new list in order of usage. */
new = tbl_mask_array_alloc(ma->max);
if (!new)
goto free_mask_entries;
for (i = 0; i < masks_entries; i++) {
int index = masks_and_count[i].index;
if (ovsl_dereference(ma->masks[index]))
new->masks[new->count++] = ma->masks[index];
}
rcu_assign_pointer(table->mask_array, new);
call_rcu(&ma->rcu, mask_array_rcu_cb);
free_mask_entries:
kfree(masks_and_count);
}
/* Initializes the flow module.
* Returns zero if successful or a negative error code. */
int ovs_flow_init(void)
{
BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+ (nr_cpu_ids
* sizeof(struct sw_flow_stats *))
+ cpumask_size(),
0, 0, NULL);
if (flow_cache == NULL)
return -ENOMEM;
flow_stats_cache
= kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
0, SLAB_HWCACHE_ALIGN, NULL);
if (flow_stats_cache == NULL) {
kmem_cache_destroy(flow_cache);
flow_cache = NULL;
return -ENOMEM;
}
return 0;
}
/* Uninitializes the flow module. */
void ovs_flow_exit(void)
{
kmem_cache_destroy(flow_stats_cache);
kmem_cache_destroy(flow_cache);
}
| linux-master | net/openvswitch/flow_table.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 Nicira, Inc.
* Copyright (c) 2013 Cisco Systems, Inc.
*/
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/openvswitch.h>
#include <linux/module.h>
#include <net/udp.h>
#include <net/ip_tunnels.h>
#include <net/rtnetlink.h>
#include <net/vxlan.h>
#include "datapath.h"
#include "vport.h"
#include "vport-netdev.h"
static struct vport_ops ovs_vxlan_netdev_vport_ops;
static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(vport->dev);
__be16 dst_port = vxlan->cfg.dst_port;
if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
return -EMSGSIZE;
if (vxlan->cfg.flags & VXLAN_F_GBP) {
struct nlattr *exts;
exts = nla_nest_start_noflag(skb, OVS_TUNNEL_ATTR_EXTENSION);
if (!exts)
return -EMSGSIZE;
if (vxlan->cfg.flags & VXLAN_F_GBP &&
nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
return -EMSGSIZE;
nla_nest_end(skb, exts);
}
return 0;
}
static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX + 1] = {
[OVS_VXLAN_EXT_GBP] = { .type = NLA_FLAG, },
};
static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr,
struct vxlan_config *conf)
{
struct nlattr *exts[OVS_VXLAN_EXT_MAX + 1];
int err;
if (nla_len(attr) < sizeof(struct nlattr))
return -EINVAL;
err = nla_parse_nested_deprecated(exts, OVS_VXLAN_EXT_MAX, attr,
exts_policy, NULL);
if (err < 0)
return err;
if (exts[OVS_VXLAN_EXT_GBP])
conf->flags |= VXLAN_F_GBP;
return 0;
}
static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
{
struct net *net = ovs_dp_get_net(parms->dp);
struct nlattr *options = parms->options;
struct net_device *dev;
struct vport *vport;
struct nlattr *a;
int err;
struct vxlan_config conf = {
.no_share = true,
.flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
/* Don't restrict the packets that can be sent by MTU */
.mtu = IP_MAX_MTU,
};
if (!options) {
err = -EINVAL;
goto error;
}
a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
if (a && nla_len(a) == sizeof(u16)) {
conf.dst_port = htons(nla_get_u16(a));
} else {
/* Require destination port from userspace. */
err = -EINVAL;
goto error;
}
vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms);
if (IS_ERR(vport))
return vport;
a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
if (a) {
err = vxlan_configure_exts(vport, a, &conf);
if (err) {
ovs_vport_free(vport);
goto error;
}
}
rtnl_lock();
dev = vxlan_dev_create(net, parms->name, NET_NAME_USER, &conf);
if (IS_ERR(dev)) {
rtnl_unlock();
ovs_vport_free(vport);
return ERR_CAST(dev);
}
err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
rtnl_delete_link(dev, 0, NULL);
rtnl_unlock();
ovs_vport_free(vport);
goto error;
}
rtnl_unlock();
return vport;
error:
return ERR_PTR(err);
}
static struct vport *vxlan_create(const struct vport_parms *parms)
{
struct vport *vport;
vport = vxlan_tnl_create(parms);
if (IS_ERR(vport))
return vport;
return ovs_netdev_link(vport, parms->name);
}
static struct vport_ops ovs_vxlan_netdev_vport_ops = {
.type = OVS_VPORT_TYPE_VXLAN,
.create = vxlan_create,
.destroy = ovs_netdev_tunnel_destroy,
.get_options = vxlan_get_options,
.send = dev_queue_xmit,
};
static int __init ovs_vxlan_tnl_init(void)
{
return ovs_vport_ops_register(&ovs_vxlan_netdev_vport_ops);
}
static void __exit ovs_vxlan_tnl_exit(void)
{
ovs_vport_ops_unregister(&ovs_vxlan_netdev_vport_ops);
}
module_init(ovs_vxlan_tnl_init);
module_exit(ovs_vxlan_tnl_exit);
MODULE_DESCRIPTION("OVS: VXLAN switching port");
MODULE_LICENSE("GPL");
MODULE_ALIAS("vport-type-4");
| linux-master | net/openvswitch/vport-vxlan.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2012 Nicira, Inc.
*/
#include <linux/netdevice.h>
#include <net/genetlink.h>
#include <net/netns/generic.h>
#include "datapath.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
static void dp_detach_port_notify(struct vport *vport)
{
struct sk_buff *notify;
struct datapath *dp;
dp = vport->dp;
notify = ovs_vport_cmd_build_info(vport, ovs_dp_get_net(dp),
0, 0, OVS_VPORT_CMD_DEL);
ovs_dp_detach_port(vport);
if (IS_ERR(notify)) {
genl_set_err(&dp_vport_genl_family, ovs_dp_get_net(dp), 0,
0, PTR_ERR(notify));
return;
}
genlmsg_multicast_netns(&dp_vport_genl_family,
ovs_dp_get_net(dp), notify, 0,
0, GFP_KERNEL);
}
void ovs_dp_notify_wq(struct work_struct *work)
{
struct ovs_net *ovs_net = container_of(work, struct ovs_net, dp_notify_work);
struct datapath *dp;
ovs_lock();
list_for_each_entry(dp, &ovs_net->dps, list_node) {
int i;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
struct hlist_node *n;
hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL)
continue;
if (!(netif_is_ovs_port(vport->dev)))
dp_detach_port_notify(vport);
}
}
}
ovs_unlock();
}
static int dp_device_event(struct notifier_block *unused, unsigned long event,
void *ptr)
{
struct ovs_net *ovs_net;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct vport *vport = NULL;
if (!ovs_is_internal_dev(dev))
vport = ovs_netdev_get_vport(dev);
if (!vport)
return NOTIFY_DONE;
if (event == NETDEV_UNREGISTER) {
/* upper_dev_unlink and decrement promisc immediately */
ovs_netdev_detach_dev(vport);
/* schedule vport destroy, dev_put and genl notification */
ovs_net = net_generic(dev_net(dev), ovs_net_id);
queue_work(system_wq, &ovs_net->dp_notify_work);
}
return NOTIFY_DONE;
}
struct notifier_block ovs_dp_device_notifier = {
.notifier_call = dp_device_event
};
| linux-master | net/openvswitch/dp_notify.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2014 Nicira, Inc.
*/
#include <linux/etherdevice.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/rtnetlink.h>
#include <linux/compat.h>
#include <net/net_namespace.h>
#include <linux/module.h>
#include "datapath.h"
#include "vport.h"
#include "vport-internal_dev.h"
static LIST_HEAD(vport_ops_list);
/* Protected by RCU read lock for reading, ovs_mutex for writing. */
static struct hlist_head *dev_table;
#define VPORT_HASH_BUCKETS 1024
/**
* ovs_vport_init - initialize vport subsystem
*
* Called at module load time to initialize the vport subsystem.
*/
int ovs_vport_init(void)
{
dev_table = kcalloc(VPORT_HASH_BUCKETS, sizeof(struct hlist_head),
GFP_KERNEL);
if (!dev_table)
return -ENOMEM;
return 0;
}
/**
* ovs_vport_exit - shutdown vport subsystem
*
* Called at module exit time to shutdown the vport subsystem.
*/
void ovs_vport_exit(void)
{
kfree(dev_table);
}
static struct hlist_head *hash_bucket(const struct net *net, const char *name)
{
unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
}
int __ovs_vport_ops_register(struct vport_ops *ops)
{
int err = -EEXIST;
struct vport_ops *o;
ovs_lock();
list_for_each_entry(o, &vport_ops_list, list)
if (ops->type == o->type)
goto errout;
list_add_tail(&ops->list, &vport_ops_list);
err = 0;
errout:
ovs_unlock();
return err;
}
EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
void ovs_vport_ops_unregister(struct vport_ops *ops)
{
ovs_lock();
list_del(&ops->list);
ovs_unlock();
}
EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
/**
* ovs_vport_locate - find a port that has already been created
*
* @net: network namespace
* @name: name of port to find
*
* Must be called with ovs or RCU read lock.
*/
struct vport *ovs_vport_locate(const struct net *net, const char *name)
{
struct hlist_head *bucket = hash_bucket(net, name);
struct vport *vport;
hlist_for_each_entry_rcu(vport, bucket, hash_node,
lockdep_ovsl_is_held())
if (!strcmp(name, ovs_vport_name(vport)) &&
net_eq(ovs_dp_get_net(vport->dp), net))
return vport;
return NULL;
}
/**
* ovs_vport_alloc - allocate and initialize new vport
*
* @priv_size: Size of private data area to allocate.
* @ops: vport device ops
* @parms: information about new vport.
*
* Allocate and initialize a new vport defined by @ops. The vport will contain
* a private data area of size @priv_size that can be accessed using
* vport_priv(). Some parameters of the vport will be initialized from @parms.
* @vports that are no longer needed should be released with
* vport_free().
*/
struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
const struct vport_parms *parms)
{
struct vport *vport;
size_t alloc_size;
int err;
alloc_size = sizeof(struct vport);
if (priv_size) {
alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
alloc_size += priv_size;
}
vport = kzalloc(alloc_size, GFP_KERNEL);
if (!vport)
return ERR_PTR(-ENOMEM);
vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
if (!vport->upcall_stats) {
err = -ENOMEM;
goto err_kfree_vport;
}
vport->dp = parms->dp;
vport->port_no = parms->port_no;
vport->ops = ops;
INIT_HLIST_NODE(&vport->dp_hash_node);
if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
err = -EINVAL;
goto err_free_percpu;
}
return vport;
err_free_percpu:
free_percpu(vport->upcall_stats);
err_kfree_vport:
kfree(vport);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ovs_vport_alloc);
/**
* ovs_vport_free - uninitialize and free vport
*
* @vport: vport to free
*
* Frees a vport allocated with vport_alloc() when it is no longer needed.
*
* The caller must ensure that an RCU grace period has passed since the last
* time @vport was in a datapath.
*/
void ovs_vport_free(struct vport *vport)
{
/* vport is freed from RCU callback or error path, Therefore
* it is safe to use raw dereference.
*/
kfree(rcu_dereference_raw(vport->upcall_portids));
free_percpu(vport->upcall_stats);
kfree(vport);
}
EXPORT_SYMBOL_GPL(ovs_vport_free);
static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
{
struct vport_ops *ops;
list_for_each_entry(ops, &vport_ops_list, list)
if (ops->type == parms->type)
return ops;
return NULL;
}
/**
* ovs_vport_add - add vport device (for kernel callers)
*
* @parms: Information about new vport.
*
* Creates a new vport with the specified configuration (which is dependent on
* device type). ovs_mutex must be held.
*/
struct vport *ovs_vport_add(const struct vport_parms *parms)
{
struct vport_ops *ops;
struct vport *vport;
ops = ovs_vport_lookup(parms);
if (ops) {
struct hlist_head *bucket;
if (!try_module_get(ops->owner))
return ERR_PTR(-EAFNOSUPPORT);
vport = ops->create(parms);
if (IS_ERR(vport)) {
module_put(ops->owner);
return vport;
}
bucket = hash_bucket(ovs_dp_get_net(vport->dp),
ovs_vport_name(vport));
hlist_add_head_rcu(&vport->hash_node, bucket);
return vport;
}
/* Unlock to attempt module load and return -EAGAIN if load
* was successful as we need to restart the port addition
* workflow.
*/
ovs_unlock();
request_module("vport-type-%d", parms->type);
ovs_lock();
if (!ovs_vport_lookup(parms))
return ERR_PTR(-EAFNOSUPPORT);
else
return ERR_PTR(-EAGAIN);
}
/**
* ovs_vport_set_options - modify existing vport device (for kernel callers)
*
* @vport: vport to modify.
* @options: New configuration.
*
* Modifies an existing device with the specified configuration (which is
* dependent on device type). ovs_mutex must be held.
*/
int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
{
if (!vport->ops->set_options)
return -EOPNOTSUPP;
return vport->ops->set_options(vport, options);
}
/**
* ovs_vport_del - delete existing vport device
*
* @vport: vport to delete.
*
* Detaches @vport from its datapath and destroys it. ovs_mutex must
* be held.
*/
void ovs_vport_del(struct vport *vport)
{
hlist_del_rcu(&vport->hash_node);
module_put(vport->ops->owner);
vport->ops->destroy(vport);
}
/**
* ovs_vport_get_stats - retrieve device stats
*
* @vport: vport from which to retrieve the stats
* @stats: location to store stats
*
* Retrieves transmit, receive, and error stats for the given device.
*
* Must be called with ovs_mutex or rcu_read_lock.
*/
void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
{
const struct rtnl_link_stats64 *dev_stats;
struct rtnl_link_stats64 temp;
dev_stats = dev_get_stats(vport->dev, &temp);
stats->rx_errors = dev_stats->rx_errors;
stats->tx_errors = dev_stats->tx_errors;
stats->tx_dropped = dev_stats->tx_dropped;
stats->rx_dropped = dev_stats->rx_dropped;
stats->rx_bytes = dev_stats->rx_bytes;
stats->rx_packets = dev_stats->rx_packets;
stats->tx_bytes = dev_stats->tx_bytes;
stats->tx_packets = dev_stats->tx_packets;
}
/**
* ovs_vport_get_upcall_stats - retrieve upcall stats
*
* @vport: vport from which to retrieve the stats.
* @skb: sk_buff where upcall stats should be appended.
*
* Retrieves upcall stats for the given device.
*
* Must be called with ovs_mutex or rcu_read_lock.
*/
int ovs_vport_get_upcall_stats(struct vport *vport, struct sk_buff *skb)
{
struct nlattr *nla;
int i;
__u64 tx_success = 0;
__u64 tx_fail = 0;
for_each_possible_cpu(i) {
const struct vport_upcall_stats_percpu *stats;
unsigned int start;
stats = per_cpu_ptr(vport->upcall_stats, i);
do {
start = u64_stats_fetch_begin(&stats->syncp);
tx_success += u64_stats_read(&stats->n_success);
tx_fail += u64_stats_read(&stats->n_fail);
} while (u64_stats_fetch_retry(&stats->syncp, start));
}
nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_UPCALL_STATS);
if (!nla)
return -EMSGSIZE;
if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_ATTR_SUCCESS, tx_success,
OVS_VPORT_ATTR_PAD)) {
nla_nest_cancel(skb, nla);
return -EMSGSIZE;
}
if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_ATTR_FAIL, tx_fail,
OVS_VPORT_ATTR_PAD)) {
nla_nest_cancel(skb, nla);
return -EMSGSIZE;
}
nla_nest_end(skb, nla);
return 0;
}
/**
* ovs_vport_get_options - retrieve device options
*
* @vport: vport from which to retrieve the options.
* @skb: sk_buff where options should be appended.
*
* Retrieves the configuration of the given device, appending an
* %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
* vport-specific attributes to @skb.
*
* Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
* negative error code if a real error occurred. If an error occurs, @skb is
* left unmodified.
*
* Must be called with ovs_mutex or rcu_read_lock.
*/
int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
{
struct nlattr *nla;
int err;
if (!vport->ops->get_options)
return 0;
nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_OPTIONS);
if (!nla)
return -EMSGSIZE;
err = vport->ops->get_options(vport, skb);
if (err) {
nla_nest_cancel(skb, nla);
return err;
}
nla_nest_end(skb, nla);
return 0;
}
/**
* ovs_vport_set_upcall_portids - set upcall portids of @vport.
*
* @vport: vport to modify.
* @ids: new configuration, an array of port ids.
*
* Sets the vport's upcall_portids to @ids.
*
* Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
* as an array of U32.
*
* Must be called with ovs_mutex.
*/
int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
{
struct vport_portids *old, *vport_portids;
if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
return -EINVAL;
old = ovsl_dereference(vport->upcall_portids);
vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
GFP_KERNEL);
if (!vport_portids)
return -ENOMEM;
vport_portids->n_ids = nla_len(ids) / sizeof(u32);
vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
nla_memcpy(vport_portids->ids, ids, nla_len(ids));
rcu_assign_pointer(vport->upcall_portids, vport_portids);
if (old)
kfree_rcu(old, rcu);
return 0;
}
/**
* ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
*
* @vport: vport from which to retrieve the portids.
* @skb: sk_buff where portids should be appended.
*
* Retrieves the configuration of the given vport, appending the
* %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
* portids to @skb.
*
* Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
* If an error occurs, @skb is left unmodified. Must be called with
* ovs_mutex or rcu_read_lock.
*/
int ovs_vport_get_upcall_portids(const struct vport *vport,
struct sk_buff *skb)
{
struct vport_portids *ids;
ids = rcu_dereference_ovsl(vport->upcall_portids);
if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
ids->n_ids * sizeof(u32), (void *)ids->ids);
else
return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
}
/**
* ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
*
* @vport: vport from which the missed packet is received.
* @skb: skb that the missed packet was received.
*
* Uses the skb_get_hash() to select the upcall portid to send the
* upcall.
*
* Returns the portid of the target socket. Must be called with rcu_read_lock.
*/
u32 ovs_vport_find_upcall_portid(const struct vport *vport,
struct sk_buff *skb)
{
struct vport_portids *ids;
u32 ids_index;
u32 hash;
ids = rcu_dereference(vport->upcall_portids);
/* If there is only one portid, select it in the fast-path. */
if (ids->n_ids == 1)
return ids->ids[0];
hash = skb_get_hash(skb);
ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
return ids->ids[ids_index];
}
/**
* ovs_vport_receive - pass up received packet to the datapath for processing
*
* @vport: vport that received the packet
* @skb: skb that was received
* @tun_info: tunnel (if any) that carried packet
*
* Must be called with rcu_read_lock. The packet cannot be shared and
* skb->data should point to the Ethernet header.
*/
int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
const struct ip_tunnel_info *tun_info)
{
struct sw_flow_key key;
int error;
OVS_CB(skb)->input_vport = vport;
OVS_CB(skb)->mru = 0;
OVS_CB(skb)->cutlen = 0;
if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
u32 mark;
mark = skb->mark;
skb_scrub_packet(skb, true);
skb->mark = mark;
tun_info = NULL;
}
/* Extract flow from 'skb' into 'key'. */
error = ovs_flow_key_extract(tun_info, skb, &key);
if (unlikely(error)) {
kfree_skb(skb);
return error;
}
ovs_dp_process_packet(skb, &key);
return 0;
}
static int packet_length(const struct sk_buff *skb,
struct net_device *dev)
{
int length = skb->len - dev->hard_header_len;
if (!skb_vlan_tag_present(skb) &&
eth_type_vlan(skb->protocol))
length -= VLAN_HLEN;
/* Don't subtract for multiple VLAN tags. Most (all?) drivers allow
* (ETH_LEN + VLAN_HLEN) in addition to the mtu value, but almost none
* account for 802.1ad. e.g. is_skb_forwardable().
*/
return length > 0 ? length : 0;
}
void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
{
int mtu = vport->dev->mtu;
switch (vport->dev->type) {
case ARPHRD_NONE:
if (mac_proto == MAC_PROTO_ETHERNET) {
skb_reset_network_header(skb);
skb_reset_mac_len(skb);
skb->protocol = htons(ETH_P_TEB);
} else if (mac_proto != MAC_PROTO_NONE) {
WARN_ON_ONCE(1);
goto drop;
}
break;
case ARPHRD_ETHER:
if (mac_proto != MAC_PROTO_ETHERNET)
goto drop;
break;
default:
goto drop;
}
if (unlikely(packet_length(skb, vport->dev) > mtu &&
!skb_is_gso(skb))) {
vport->dev->stats.tx_errors++;
if (vport->dev->flags & IFF_UP)
net_warn_ratelimited("%s: dropped over-mtu packet: "
"%d > %d\n", vport->dev->name,
packet_length(skb, vport->dev),
mtu);
goto drop;
}
skb->dev = vport->dev;
skb_clear_tstamp(skb);
vport->ops->send(skb);
return;
drop:
kfree_skb(skb);
}
| linux-master | net/openvswitch/vport.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2014 Nicira, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/jhash.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/etherdevice.h>
#include <linux/genetlink.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ethtool.h>
#include <linux/wait.h>
#include <asm/div64.h>
#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/inetdevice.h>
#include <linux/list.h>
#include <linux/openvswitch.h>
#include <linux/rculist.h>
#include <linux/dmi.h>
#include <net/genetlink.h>
#include <net/gso.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/pkt_cls.h>
#include "datapath.h"
#include "drop.h"
#include "flow.h"
#include "flow_table.h"
#include "flow_netlink.h"
#include "meter.h"
#include "openvswitch_trace.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
unsigned int ovs_net_id __read_mostly;
static struct genl_family dp_packet_genl_family;
static struct genl_family dp_flow_genl_family;
static struct genl_family dp_datapath_genl_family;
static const struct nla_policy flow_policy[];
static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
.name = OVS_FLOW_MCGROUP,
};
static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
.name = OVS_DATAPATH_MCGROUP,
};
static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
.name = OVS_VPORT_MCGROUP,
};
/* Check if need to build a reply message.
* OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
unsigned int group)
{
return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
genl_has_listeners(family, genl_info_net(info), group);
}
static void ovs_notify(struct genl_family *family,
struct sk_buff *skb, struct genl_info *info)
{
genl_notify(family, skb, info, 0, GFP_KERNEL);
}
/**
* DOC: Locking:
*
* All writes e.g. Writes to device state (add/remove datapath, port, set
* operations on vports, etc.), Writes to other state (flow table
* modifications, set miscellaneous datapath parameters, etc.) are protected
* by ovs_lock.
*
* Reads are protected by RCU.
*
* There are a few special cases (mostly stats) that have their own
* synchronization but they nest under all of above and don't interact with
* each other.
*
* The RTNL lock nests inside ovs_mutex.
*/
static DEFINE_MUTEX(ovs_mutex);
void ovs_lock(void)
{
mutex_lock(&ovs_mutex);
}
void ovs_unlock(void)
{
mutex_unlock(&ovs_mutex);
}
#ifdef CONFIG_LOCKDEP
int lockdep_ovsl_is_held(void)
{
if (debug_locks)
return lockdep_is_held(&ovs_mutex);
else
return 1;
}
#endif
static struct vport *new_vport(const struct vport_parms *);
static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
const struct sw_flow_key *,
const struct dp_upcall_info *,
uint32_t cutlen);
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
const struct sw_flow_key *,
const struct dp_upcall_info *,
uint32_t cutlen);
static void ovs_dp_masks_rebalance(struct work_struct *work);
static int ovs_dp_set_upcall_portids(struct datapath *, const struct nlattr *);
/* Must be called with rcu_read_lock or ovs_mutex. */
const char *ovs_dp_name(const struct datapath *dp)
{
struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
return ovs_vport_name(vport);
}
static int get_dpifindex(const struct datapath *dp)
{
struct vport *local;
int ifindex;
rcu_read_lock();
local = ovs_vport_rcu(dp, OVSP_LOCAL);
if (local)
ifindex = local->dev->ifindex;
else
ifindex = 0;
rcu_read_unlock();
return ifindex;
}
static void destroy_dp_rcu(struct rcu_head *rcu)
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
ovs_flow_tbl_destroy(&dp->table);
free_percpu(dp->stats_percpu);
kfree(dp->ports);
ovs_meters_exit(dp);
kfree(rcu_dereference_raw(dp->upcall_portids));
kfree(dp);
}
static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
u16 port_no)
{
return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
}
/* Called with ovs_mutex or RCU read lock. */
struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
{
struct vport *vport;
struct hlist_head *head;
head = vport_hash_bucket(dp, port_no);
hlist_for_each_entry_rcu(vport, head, dp_hash_node,
lockdep_ovsl_is_held()) {
if (vport->port_no == port_no)
return vport;
}
return NULL;
}
/* Called with ovs_mutex. */
static struct vport *new_vport(const struct vport_parms *parms)
{
struct vport *vport;
vport = ovs_vport_add(parms);
if (!IS_ERR(vport)) {
struct datapath *dp = parms->dp;
struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
hlist_add_head_rcu(&vport->dp_hash_node, head);
}
return vport;
}
static void ovs_vport_update_upcall_stats(struct sk_buff *skb,
const struct dp_upcall_info *upcall_info,
bool upcall_result)
{
struct vport *p = OVS_CB(skb)->input_vport;
struct vport_upcall_stats_percpu *stats;
if (upcall_info->cmd != OVS_PACKET_CMD_MISS &&
upcall_info->cmd != OVS_PACKET_CMD_ACTION)
return;
stats = this_cpu_ptr(p->upcall_stats);
u64_stats_update_begin(&stats->syncp);
if (upcall_result)
u64_stats_inc(&stats->n_success);
else
u64_stats_inc(&stats->n_fail);
u64_stats_update_end(&stats->syncp);
}
void ovs_dp_detach_port(struct vport *p)
{
ASSERT_OVSL();
/* First drop references to device. */
hlist_del_rcu(&p->dp_hash_node);
/* Then destroy it. */
ovs_vport_del(p);
}
/* Must be called with rcu_read_lock. */
void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
{
const struct vport *p = OVS_CB(skb)->input_vport;
struct datapath *dp = p->dp;
struct sw_flow *flow;
struct sw_flow_actions *sf_acts;
struct dp_stats_percpu *stats;
u64 *stats_counter;
u32 n_mask_hit;
u32 n_cache_hit;
int error;
stats = this_cpu_ptr(dp->stats_percpu);
/* Look up flow. */
flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
&n_mask_hit, &n_cache_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
memset(&upcall, 0, sizeof(upcall));
upcall.cmd = OVS_PACKET_CMD_MISS;
if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
upcall.portid =
ovs_dp_get_upcall_portid(dp, smp_processor_id());
else
upcall.portid = ovs_vport_find_upcall_portid(p, skb);
upcall.mru = OVS_CB(skb)->mru;
error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
switch (error) {
case 0:
case -EAGAIN:
case -ERESTARTSYS:
case -EINTR:
consume_skb(skb);
break;
default:
kfree_skb(skb);
break;
}
stats_counter = &stats->n_missed;
goto out;
}
ovs_flow_stats_update(flow, key->tp.flags, skb);
sf_acts = rcu_dereference(flow->sf_acts);
error = ovs_execute_actions(dp, skb, sf_acts, key);
if (unlikely(error))
net_dbg_ratelimited("ovs: action execution error on datapath %s: %d\n",
ovs_dp_name(dp), error);
stats_counter = &stats->n_hit;
out:
/* Update datapath statistics. */
u64_stats_update_begin(&stats->syncp);
(*stats_counter)++;
stats->n_mask_hit += n_mask_hit;
stats->n_cache_hit += n_cache_hit;
u64_stats_update_end(&stats->syncp);
}
int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info,
uint32_t cutlen)
{
struct dp_stats_percpu *stats;
int err;
if (trace_ovs_dp_upcall_enabled())
trace_ovs_dp_upcall(dp, skb, key, upcall_info);
if (upcall_info->portid == 0) {
err = -ENOTCONN;
goto err;
}
if (!skb_is_gso(skb))
err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
else
err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
ovs_vport_update_upcall_stats(skb, upcall_info, !err);
if (err)
goto err;
return 0;
err:
stats = this_cpu_ptr(dp->stats_percpu);
u64_stats_update_begin(&stats->syncp);
stats->n_lost++;
u64_stats_update_end(&stats->syncp);
return err;
}
static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info,
uint32_t cutlen)
{
unsigned int gso_type = skb_shinfo(skb)->gso_type;
struct sw_flow_key later_key;
struct sk_buff *segs, *nskb;
int err;
BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_GSO_CB_OFFSET);
segs = __skb_gso_segment(skb, NETIF_F_SG, false);
if (IS_ERR(segs))
return PTR_ERR(segs);
if (segs == NULL)
return -EINVAL;
if (gso_type & SKB_GSO_UDP) {
/* The initial flow key extracted by ovs_flow_key_extract()
* in this case is for a first fragment, so we need to
* properly mark later fragments.
*/
later_key = *key;
later_key.ip.frag = OVS_FRAG_TYPE_LATER;
}
/* Queue all of the segments. */
skb_list_walk_safe(segs, skb, nskb) {
if (gso_type & SKB_GSO_UDP && skb != segs)
key = &later_key;
err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
if (err)
break;
}
/* Free all of the segments. */
skb_list_walk_safe(segs, skb, nskb) {
if (err)
kfree_skb(skb);
else
consume_skb(skb);
}
return err;
}
static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
unsigned int hdrlen, int actions_attrlen)
{
size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
+ nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
+ nla_total_size(sizeof(unsigned int)) /* OVS_PACKET_ATTR_LEN */
+ nla_total_size(sizeof(u64)); /* OVS_PACKET_ATTR_HASH */
/* OVS_PACKET_ATTR_USERDATA */
if (upcall_info->userdata)
size += NLA_ALIGN(upcall_info->userdata->nla_len);
/* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
if (upcall_info->egress_tun_info)
size += nla_total_size(ovs_tun_key_attr_size());
/* OVS_PACKET_ATTR_ACTIONS */
if (upcall_info->actions_len)
size += nla_total_size(actions_attrlen);
/* OVS_PACKET_ATTR_MRU */
if (upcall_info->mru)
size += nla_total_size(sizeof(upcall_info->mru));
return size;
}
static void pad_packet(struct datapath *dp, struct sk_buff *skb)
{
if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
size_t plen = NLA_ALIGN(skb->len) - skb->len;
if (plen > 0)
skb_put_zero(skb, plen);
}
}
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info,
uint32_t cutlen)
{
struct ovs_header *upcall;
struct sk_buff *nskb = NULL;
struct sk_buff *user_skb = NULL; /* to be queued to userspace */
struct nlattr *nla;
size_t len;
unsigned int hlen;
int err, dp_ifindex;
u64 hash;
dp_ifindex = get_dpifindex(dp);
if (!dp_ifindex)
return -ENODEV;
if (skb_vlan_tag_present(skb)) {
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;
nskb = __vlan_hwaccel_push_inside(nskb);
if (!nskb)
return -ENOMEM;
skb = nskb;
}
if (nla_attr_size(skb->len) > USHRT_MAX) {
err = -EFBIG;
goto out;
}
/* Complete checksum if needed */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(err = skb_csum_hwoffload_help(skb, 0)))
goto out;
/* Older versions of OVS user space enforce alignment of the last
* Netlink attribute to NLA_ALIGNTO which would require extensive
* padding logic. Only perform zerocopy if padding is not required.
*/
if (dp->user_features & OVS_DP_F_UNALIGNED)
hlen = skb_zerocopy_headlen(skb);
else
hlen = skb->len;
len = upcall_msg_size(upcall_info, hlen - cutlen,
OVS_CB(skb)->acts_origlen);
user_skb = genlmsg_new(len, GFP_ATOMIC);
if (!user_skb) {
err = -ENOMEM;
goto out;
}
upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
0, upcall_info->cmd);
if (!upcall) {
err = -EINVAL;
goto out;
}
upcall->dp_ifindex = dp_ifindex;
err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
if (err)
goto out;
if (upcall_info->userdata)
__nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
nla_len(upcall_info->userdata),
nla_data(upcall_info->userdata));
if (upcall_info->egress_tun_info) {
nla = nla_nest_start_noflag(user_skb,
OVS_PACKET_ATTR_EGRESS_TUN_KEY);
if (!nla) {
err = -EMSGSIZE;
goto out;
}
err = ovs_nla_put_tunnel_info(user_skb,
upcall_info->egress_tun_info);
if (err)
goto out;
nla_nest_end(user_skb, nla);
}
if (upcall_info->actions_len) {
nla = nla_nest_start_noflag(user_skb, OVS_PACKET_ATTR_ACTIONS);
if (!nla) {
err = -EMSGSIZE;
goto out;
}
err = ovs_nla_put_actions(upcall_info->actions,
upcall_info->actions_len,
user_skb);
if (!err)
nla_nest_end(user_skb, nla);
else
nla_nest_cancel(user_skb, nla);
}
/* Add OVS_PACKET_ATTR_MRU */
if (upcall_info->mru &&
nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU, upcall_info->mru)) {
err = -ENOBUFS;
goto out;
}
/* Add OVS_PACKET_ATTR_LEN when packet is truncated */
if (cutlen > 0 &&
nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN, skb->len)) {
err = -ENOBUFS;
goto out;
}
/* Add OVS_PACKET_ATTR_HASH */
hash = skb_get_hash_raw(skb);
if (skb->sw_hash)
hash |= OVS_PACKET_HASH_SW_BIT;
if (skb->l4_hash)
hash |= OVS_PACKET_HASH_L4_BIT;
if (nla_put(user_skb, OVS_PACKET_ATTR_HASH, sizeof (u64), &hash)) {
err = -ENOBUFS;
goto out;
}
/* Only reserve room for attribute header, packet data is added
* in skb_zerocopy() */
if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
err = -ENOBUFS;
goto out;
}
nla->nla_len = nla_attr_size(skb->len - cutlen);
err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
if (err)
goto out;
/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
pad_packet(dp, user_skb);
((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
user_skb = NULL;
out:
if (err)
skb_tx_error(skb);
consume_skb(user_skb);
consume_skb(nskb);
return err;
}
static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct net *net = sock_net(skb->sk);
struct nlattr **a = info->attrs;
struct sw_flow_actions *acts;
struct sk_buff *packet;
struct sw_flow *flow;
struct sw_flow_actions *sf_acts;
struct datapath *dp;
struct vport *input_vport;
u16 mru = 0;
u64 hash;
int len;
int err;
bool log = !a[OVS_PACKET_ATTR_PROBE];
err = -EINVAL;
if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
!a[OVS_PACKET_ATTR_ACTIONS])
goto err;
len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
err = -ENOMEM;
if (!packet)
goto err;
skb_reserve(packet, NET_IP_ALIGN);
nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
/* Set packet's mru */
if (a[OVS_PACKET_ATTR_MRU]) {
mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
packet->ignore_df = 1;
}
OVS_CB(packet)->mru = mru;
if (a[OVS_PACKET_ATTR_HASH]) {
hash = nla_get_u64(a[OVS_PACKET_ATTR_HASH]);
__skb_set_hash(packet, hash & 0xFFFFFFFFULL,
!!(hash & OVS_PACKET_HASH_SW_BIT),
!!(hash & OVS_PACKET_HASH_L4_BIT));
}
/* Build an sw_flow for sending this packet. */
flow = ovs_flow_alloc();
err = PTR_ERR(flow);
if (IS_ERR(flow))
goto err_kfree_skb;
err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
packet, &flow->key, log);
if (err)
goto err_flow_free;
err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
&flow->key, &acts, log);
if (err)
goto err_flow_free;
rcu_assign_pointer(flow->sf_acts, acts);
packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark;
rcu_read_lock();
dp = get_dp_rcu(net, ovs_header->dp_ifindex);
err = -ENODEV;
if (!dp)
goto err_unlock;
input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
if (!input_vport)
input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
if (!input_vport)
goto err_unlock;
packet->dev = input_vport->dev;
OVS_CB(packet)->input_vport = input_vport;
sf_acts = rcu_dereference(flow->sf_acts);
local_bh_disable();
err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
local_bh_enable();
rcu_read_unlock();
ovs_flow_free(flow, false);
return err;
err_unlock:
rcu_read_unlock();
err_flow_free:
ovs_flow_free(flow, false);
err_kfree_skb:
kfree_skb(packet);
err:
return err;
}
static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
[OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
[OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 },
};
static const struct genl_small_ops dp_packet_genl_ops[] = {
{ .cmd = OVS_PACKET_CMD_EXECUTE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
.doit = ovs_packet_cmd_execute
}
};
static struct genl_family dp_packet_genl_family __ro_after_init = {
.hdrsize = sizeof(struct ovs_header),
.name = OVS_PACKET_FAMILY,
.version = OVS_PACKET_VERSION,
.maxattr = OVS_PACKET_ATTR_MAX,
.policy = packet_policy,
.netnsok = true,
.parallel_ops = true,
.small_ops = dp_packet_genl_ops,
.n_small_ops = ARRAY_SIZE(dp_packet_genl_ops),
.resv_start_op = OVS_PACKET_CMD_EXECUTE + 1,
.module = THIS_MODULE,
};
static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
struct ovs_dp_megaflow_stats *mega_stats)
{
int i;
memset(mega_stats, 0, sizeof(*mega_stats));
stats->n_flows = ovs_flow_tbl_count(&dp->table);
mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
stats->n_hit = stats->n_missed = stats->n_lost = 0;
for_each_possible_cpu(i) {
const struct dp_stats_percpu *percpu_stats;
struct dp_stats_percpu local_stats;
unsigned int start;
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
do {
start = u64_stats_fetch_begin(&percpu_stats->syncp);
local_stats = *percpu_stats;
} while (u64_stats_fetch_retry(&percpu_stats->syncp, start));
stats->n_hit += local_stats.n_hit;
stats->n_missed += local_stats.n_missed;
stats->n_lost += local_stats.n_lost;
mega_stats->n_mask_hit += local_stats.n_mask_hit;
mega_stats->n_cache_hit += local_stats.n_cache_hit;
}
}
static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
{
return ovs_identifier_is_ufid(sfid) &&
!(ufid_flags & OVS_UFID_F_OMIT_KEY);
}
static bool should_fill_mask(uint32_t ufid_flags)
{
return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
}
static bool should_fill_actions(uint32_t ufid_flags)
{
return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
}
static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
const struct sw_flow_id *sfid,
uint32_t ufid_flags)
{
size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
/* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
* see ovs_nla_put_identifier()
*/
if (sfid && ovs_identifier_is_ufid(sfid))
len += nla_total_size(sfid->ufid_len);
else
len += nla_total_size(ovs_key_attr_size());
/* OVS_FLOW_ATTR_KEY */
if (!sfid || should_fill_key(sfid, ufid_flags))
len += nla_total_size(ovs_key_attr_size());
/* OVS_FLOW_ATTR_MASK */
if (should_fill_mask(ufid_flags))
len += nla_total_size(ovs_key_attr_size());
/* OVS_FLOW_ATTR_ACTIONS */
if (should_fill_actions(ufid_flags))
len += nla_total_size(acts->orig_len);
return len
+ nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
+ nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
}
/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
struct sk_buff *skb)
{
struct ovs_flow_stats stats;
__be16 tcp_flags;
unsigned long used;
ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
if (used &&
nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
OVS_FLOW_ATTR_PAD))
return -EMSGSIZE;
if (stats.n_packets &&
nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
sizeof(struct ovs_flow_stats), &stats,
OVS_FLOW_ATTR_PAD))
return -EMSGSIZE;
if ((u8)ntohs(tcp_flags) &&
nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
return -EMSGSIZE;
return 0;
}
/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
struct sk_buff *skb, int skb_orig_len)
{
struct nlattr *start;
int err;
/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
* this is the first flow to be dumped into 'skb'. This is unusual for
* Netlink but individual action lists can be longer than
* NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
* The userspace caller can always fetch the actions separately if it
* really wants them. (Most userspace callers in fact don't care.)
*
* This can only fail for dump operations because the skb is always
* properly sized for single flows.
*/
start = nla_nest_start_noflag(skb, OVS_FLOW_ATTR_ACTIONS);
if (start) {
const struct sw_flow_actions *sf_acts;
sf_acts = rcu_dereference_ovsl(flow->sf_acts);
err = ovs_nla_put_actions(sf_acts->actions,
sf_acts->actions_len, skb);
if (!err)
nla_nest_end(skb, start);
else {
if (skb_orig_len)
return err;
nla_nest_cancel(skb, start);
}
} else if (skb_orig_len) {
return -EMSGSIZE;
}
return 0;
}
/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
struct sk_buff *skb, u32 portid,
u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
{
const int skb_orig_len = skb->len;
struct ovs_header *ovs_header;
int err;
ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
flags, cmd);
if (!ovs_header)
return -EMSGSIZE;
ovs_header->dp_ifindex = dp_ifindex;
err = ovs_nla_put_identifier(flow, skb);
if (err)
goto error;
if (should_fill_key(&flow->id, ufid_flags)) {
err = ovs_nla_put_masked_key(flow, skb);
if (err)
goto error;
}
if (should_fill_mask(ufid_flags)) {
err = ovs_nla_put_mask(flow, skb);
if (err)
goto error;
}
err = ovs_flow_cmd_fill_stats(flow, skb);
if (err)
goto error;
if (should_fill_actions(ufid_flags)) {
err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
if (err)
goto error;
}
genlmsg_end(skb, ovs_header);
return 0;
error:
genlmsg_cancel(skb, ovs_header);
return err;
}
/* May not be called with RCU read lock. */
static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
const struct sw_flow_id *sfid,
struct genl_info *info,
bool always,
uint32_t ufid_flags)
{
struct sk_buff *skb;
size_t len;
if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
return NULL;
len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
skb = genlmsg_new(len, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
return skb;
}
/* Called with ovs_mutex. */
static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
int dp_ifindex,
struct genl_info *info, u8 cmd,
bool always, u32 ufid_flags)
{
struct sk_buff *skb;
int retval;
skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
&flow->id, info, always, ufid_flags);
if (IS_ERR_OR_NULL(skb))
return skb;
retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
info->snd_portid, info->snd_seq, 0,
cmd, ufid_flags);
if (WARN_ON_ONCE(retval < 0)) {
kfree_skb(skb);
skb = ERR_PTR(retval);
}
return skb;
}
static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct sw_flow *flow = NULL, *new_flow;
struct sw_flow_mask mask;
struct sk_buff *reply;
struct datapath *dp;
struct sw_flow_key *key;
struct sw_flow_actions *acts;
struct sw_flow_match match;
u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
int error;
bool log = !a[OVS_FLOW_ATTR_PROBE];
/* Must have key and actions. */
error = -EINVAL;
if (!a[OVS_FLOW_ATTR_KEY]) {
OVS_NLERR(log, "Flow key attr not present in new flow.");
goto error;
}
if (!a[OVS_FLOW_ATTR_ACTIONS]) {
OVS_NLERR(log, "Flow actions attr not present in new flow.");
goto error;
}
/* Most of the time we need to allocate a new flow, do it before
* locking.
*/
new_flow = ovs_flow_alloc();
if (IS_ERR(new_flow)) {
error = PTR_ERR(new_flow);
goto error;
}
/* Extract key. */
key = kzalloc(sizeof(*key), GFP_KERNEL);
if (!key) {
error = -ENOMEM;
goto err_kfree_flow;
}
ovs_match_init(&match, key, false, &mask);
error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
if (error)
goto err_kfree_key;
ovs_flow_mask_key(&new_flow->key, key, true, &mask);
/* Extract flow identifier. */
error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
key, log);
if (error)
goto err_kfree_key;
/* Validate actions. */
error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
&new_flow->key, &acts, log);
if (error) {
OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
goto err_kfree_key;
}
reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
ufid_flags);
if (IS_ERR(reply)) {
error = PTR_ERR(reply);
goto err_kfree_acts;
}
ovs_lock();
dp = get_dp(net, ovs_header->dp_ifindex);
if (unlikely(!dp)) {
error = -ENODEV;
goto err_unlock_ovs;
}
/* Check if this is a duplicate flow */
if (ovs_identifier_is_ufid(&new_flow->id))
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
if (!flow)
flow = ovs_flow_tbl_lookup(&dp->table, key);
if (likely(!flow)) {
rcu_assign_pointer(new_flow->sf_acts, acts);
/* Put flow in bucket. */
error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
if (unlikely(error)) {
acts = NULL;
goto err_unlock_ovs;
}
if (unlikely(reply)) {
error = ovs_flow_cmd_fill_info(new_flow,
ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
OVS_FLOW_CMD_NEW,
ufid_flags);
BUG_ON(error < 0);
}
ovs_unlock();
} else {
struct sw_flow_actions *old_acts;
/* Bail out if we're not allowed to modify an existing flow.
* We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
* because Generic Netlink treats the latter as a dump
* request. We also accept NLM_F_EXCL in case that bug ever
* gets fixed.
*/
if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
| NLM_F_EXCL))) {
error = -EEXIST;
goto err_unlock_ovs;
}
/* The flow identifier has to be the same for flow updates.
* Look for any overlapping flow.
*/
if (unlikely(!ovs_flow_cmp(flow, &match))) {
if (ovs_identifier_is_key(&flow->id))
flow = ovs_flow_tbl_lookup_exact(&dp->table,
&match);
else /* UFID matches but key is different */
flow = NULL;
if (!flow) {
error = -ENOENT;
goto err_unlock_ovs;
}
}
/* Update actions. */
old_acts = ovsl_dereference(flow->sf_acts);
rcu_assign_pointer(flow->sf_acts, acts);
if (unlikely(reply)) {
error = ovs_flow_cmd_fill_info(flow,
ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
OVS_FLOW_CMD_NEW,
ufid_flags);
BUG_ON(error < 0);
}
ovs_unlock();
ovs_nla_free_flow_actions_rcu(old_acts);
ovs_flow_free(new_flow, false);
}
if (reply)
ovs_notify(&dp_flow_genl_family, reply, info);
kfree(key);
return 0;
err_unlock_ovs:
ovs_unlock();
kfree_skb(reply);
err_kfree_acts:
ovs_nla_free_flow_actions(acts);
err_kfree_key:
kfree(key);
err_kfree_flow:
ovs_flow_free(new_flow, false);
error:
return error;
}
/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
static noinline_for_stack
struct sw_flow_actions *get_flow_actions(struct net *net,
const struct nlattr *a,
const struct sw_flow_key *key,
const struct sw_flow_mask *mask,
bool log)
{
struct sw_flow_actions *acts;
struct sw_flow_key masked_key;
int error;
ovs_flow_mask_key(&masked_key, key, true, mask);
error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
if (error) {
OVS_NLERR(log,
"Actions may not be safe on all matching packets");
return ERR_PTR(error);
}
return acts;
}
/* Factor out match-init and action-copy to avoid
* "Wframe-larger-than=1024" warning. Because mask is only
* used to get actions, we new a function to save some
* stack space.
*
* If there are not key and action attrs, we return 0
* directly. In the case, the caller will also not use the
* match as before. If there is action attr, we try to get
* actions and save them to *acts. Before returning from
* the function, we reset the match->mask pointer. Because
* we should not to return match object with dangling reference
* to mask.
* */
static noinline_for_stack int
ovs_nla_init_match_and_action(struct net *net,
struct sw_flow_match *match,
struct sw_flow_key *key,
struct nlattr **a,
struct sw_flow_actions **acts,
bool log)
{
struct sw_flow_mask mask;
int error = 0;
if (a[OVS_FLOW_ATTR_KEY]) {
ovs_match_init(match, key, true, &mask);
error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
if (error)
goto error;
}
if (a[OVS_FLOW_ATTR_ACTIONS]) {
if (!a[OVS_FLOW_ATTR_KEY]) {
OVS_NLERR(log,
"Flow key attribute not present in set flow.");
error = -EINVAL;
goto error;
}
*acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
&mask, log);
if (IS_ERR(*acts)) {
error = PTR_ERR(*acts);
goto error;
}
}
/* On success, error is 0. */
error:
match->mask = NULL;
return error;
}
static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct sw_flow_key key;
struct sw_flow *flow;
struct sk_buff *reply = NULL;
struct datapath *dp;
struct sw_flow_actions *old_acts = NULL, *acts = NULL;
struct sw_flow_match match;
struct sw_flow_id sfid;
u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
int error = 0;
bool log = !a[OVS_FLOW_ATTR_PROBE];
bool ufid_present;
ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
OVS_NLERR(log,
"Flow set message rejected, Key attribute missing.");
return -EINVAL;
}
error = ovs_nla_init_match_and_action(net, &match, &key, a,
&acts, log);
if (error)
goto error;
if (acts) {
/* Can allocate before locking if have acts. */
reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
ufid_flags);
if (IS_ERR(reply)) {
error = PTR_ERR(reply);
goto err_kfree_acts;
}
}
ovs_lock();
dp = get_dp(net, ovs_header->dp_ifindex);
if (unlikely(!dp)) {
error = -ENODEV;
goto err_unlock_ovs;
}
/* Check that the flow exists. */
if (ufid_present)
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
else
flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (unlikely(!flow)) {
error = -ENOENT;
goto err_unlock_ovs;
}
/* Update actions, if present. */
if (likely(acts)) {
old_acts = ovsl_dereference(flow->sf_acts);
rcu_assign_pointer(flow->sf_acts, acts);
if (unlikely(reply)) {
error = ovs_flow_cmd_fill_info(flow,
ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
OVS_FLOW_CMD_SET,
ufid_flags);
BUG_ON(error < 0);
}
} else {
/* Could not alloc without acts before locking. */
reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
info, OVS_FLOW_CMD_SET, false,
ufid_flags);
if (IS_ERR(reply)) {
error = PTR_ERR(reply);
goto err_unlock_ovs;
}
}
/* Clear stats. */
if (a[OVS_FLOW_ATTR_CLEAR])
ovs_flow_stats_clear(flow);
ovs_unlock();
if (reply)
ovs_notify(&dp_flow_genl_family, reply, info);
if (old_acts)
ovs_nla_free_flow_actions_rcu(old_acts);
return 0;
err_unlock_ovs:
ovs_unlock();
kfree_skb(reply);
err_kfree_acts:
ovs_nla_free_flow_actions(acts);
error:
return error;
}
static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct net *net = sock_net(skb->sk);
struct sw_flow_key key;
struct sk_buff *reply;
struct sw_flow *flow;
struct datapath *dp;
struct sw_flow_match match;
struct sw_flow_id ufid;
u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
int err = 0;
bool log = !a[OVS_FLOW_ATTR_PROBE];
bool ufid_present;
ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
if (a[OVS_FLOW_ATTR_KEY]) {
ovs_match_init(&match, &key, true, NULL);
err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
log);
} else if (!ufid_present) {
OVS_NLERR(log,
"Flow get message rejected, Key attribute missing.");
err = -EINVAL;
}
if (err)
return err;
ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
err = -ENODEV;
goto unlock;
}
if (ufid_present)
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
else
flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (!flow) {
err = -ENOENT;
goto unlock;
}
reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
OVS_FLOW_CMD_GET, true, ufid_flags);
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
goto unlock;
}
ovs_unlock();
return genlmsg_reply(reply, info);
unlock:
ovs_unlock();
return err;
}
static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct net *net = sock_net(skb->sk);
struct sw_flow_key key;
struct sk_buff *reply;
struct sw_flow *flow = NULL;
struct datapath *dp;
struct sw_flow_match match;
struct sw_flow_id ufid;
u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
int err;
bool log = !a[OVS_FLOW_ATTR_PROBE];
bool ufid_present;
ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
if (a[OVS_FLOW_ATTR_KEY]) {
ovs_match_init(&match, &key, true, NULL);
err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
NULL, log);
if (unlikely(err))
return err;
}
ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
if (unlikely(!dp)) {
err = -ENODEV;
goto unlock;
}
if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
err = ovs_flow_tbl_flush(&dp->table);
goto unlock;
}
if (ufid_present)
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
else
flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (unlikely(!flow)) {
err = -ENOENT;
goto unlock;
}
ovs_flow_tbl_remove(&dp->table, flow);
ovs_unlock();
reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
&flow->id, info, false, ufid_flags);
if (likely(reply)) {
if (!IS_ERR(reply)) {
rcu_read_lock(); /*To keep RCU checker happy. */
err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
OVS_FLOW_CMD_DEL,
ufid_flags);
rcu_read_unlock();
if (WARN_ON_ONCE(err < 0)) {
kfree_skb(reply);
goto out_free;
}
ovs_notify(&dp_flow_genl_family, reply, info);
} else {
netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0,
PTR_ERR(reply));
}
}
out_free:
ovs_flow_free(flow, true);
return 0;
unlock:
ovs_unlock();
return err;
}
static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *a[__OVS_FLOW_ATTR_MAX];
struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
struct table_instance *ti;
struct datapath *dp;
u32 ufid_flags;
int err;
err = genlmsg_parse_deprecated(cb->nlh, &dp_flow_genl_family, a,
OVS_FLOW_ATTR_MAX, flow_policy, NULL);
if (err)
return err;
ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
rcu_read_lock();
dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
rcu_read_unlock();
return -ENODEV;
}
ti = rcu_dereference(dp->table.ti);
for (;;) {
struct sw_flow *flow;
u32 bucket, obj;
bucket = cb->args[0];
obj = cb->args[1];
flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
if (!flow)
break;
if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
OVS_FLOW_CMD_GET, ufid_flags) < 0)
break;
cb->args[0] = bucket;
cb->args[1] = obj;
}
rcu_read_unlock();
return skb->len;
}
static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
[OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
[OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
[OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
[OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
[OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
[OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
[OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
};
static const struct genl_small_ops dp_flow_genl_ops[] = {
{ .cmd = OVS_FLOW_CMD_NEW,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
.doit = ovs_flow_cmd_new
},
{ .cmd = OVS_FLOW_CMD_DEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
.doit = ovs_flow_cmd_del
},
{ .cmd = OVS_FLOW_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = 0, /* OK for unprivileged users. */
.doit = ovs_flow_cmd_get,
.dumpit = ovs_flow_cmd_dump
},
{ .cmd = OVS_FLOW_CMD_SET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
.doit = ovs_flow_cmd_set,
},
};
static struct genl_family dp_flow_genl_family __ro_after_init = {
.hdrsize = sizeof(struct ovs_header),
.name = OVS_FLOW_FAMILY,
.version = OVS_FLOW_VERSION,
.maxattr = OVS_FLOW_ATTR_MAX,
.policy = flow_policy,
.netnsok = true,
.parallel_ops = true,
.small_ops = dp_flow_genl_ops,
.n_small_ops = ARRAY_SIZE(dp_flow_genl_ops),
.resv_start_op = OVS_FLOW_CMD_SET + 1,
.mcgrps = &ovs_dp_flow_multicast_group,
.n_mcgrps = 1,
.module = THIS_MODULE,
};
static size_t ovs_dp_cmd_msg_size(void)
{
size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
msgsize += nla_total_size(IFNAMSIZ);
msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_MASKS_CACHE_SIZE */
msgsize += nla_total_size(sizeof(u32) * nr_cpu_ids); /* OVS_DP_ATTR_PER_CPU_PIDS */
return msgsize;
}
/* Called with ovs_mutex. */
static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
u32 portid, u32 seq, u32 flags, u8 cmd)
{
struct ovs_header *ovs_header;
struct ovs_dp_stats dp_stats;
struct ovs_dp_megaflow_stats dp_megaflow_stats;
struct dp_nlsk_pids *pids = ovsl_dereference(dp->upcall_portids);
int err, pids_len;
ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
flags, cmd);
if (!ovs_header)
goto error;
ovs_header->dp_ifindex = get_dpifindex(dp);
err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
if (err)
goto nla_put_failure;
get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
&dp_stats, OVS_DP_ATTR_PAD))
goto nla_put_failure;
if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
sizeof(struct ovs_dp_megaflow_stats),
&dp_megaflow_stats, OVS_DP_ATTR_PAD))
goto nla_put_failure;
if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
goto nla_put_failure;
if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE,
ovs_flow_tbl_masks_cache_size(&dp->table)))
goto nla_put_failure;
if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU && pids) {
pids_len = min(pids->n_pids, nr_cpu_ids) * sizeof(u32);
if (nla_put(skb, OVS_DP_ATTR_PER_CPU_PIDS, pids_len, &pids->pids))
goto nla_put_failure;
}
genlmsg_end(skb, ovs_header);
return 0;
nla_put_failure:
genlmsg_cancel(skb, ovs_header);
error:
return -EMSGSIZE;
}
static struct sk_buff *ovs_dp_cmd_alloc_info(void)
{
return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
}
/* Called with rcu_read_lock or ovs_mutex. */
static struct datapath *lookup_datapath(struct net *net,
const struct ovs_header *ovs_header,
struct nlattr *a[OVS_DP_ATTR_MAX + 1])
{
struct datapath *dp;
if (!a[OVS_DP_ATTR_NAME])
dp = get_dp(net, ovs_header->dp_ifindex);
else {
struct vport *vport;
vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
}
return dp ? dp : ERR_PTR(-ENODEV);
}
static void ovs_dp_reset_user_features(struct sk_buff *skb,
struct genl_info *info)
{
struct datapath *dp;
dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
info->attrs);
if (IS_ERR(dp))
return;
pr_warn("%s: Dropping previously announced user features\n",
ovs_dp_name(dp));
dp->user_features = 0;
}
static int ovs_dp_set_upcall_portids(struct datapath *dp,
const struct nlattr *ids)
{
struct dp_nlsk_pids *old, *dp_nlsk_pids;
if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
return -EINVAL;
old = ovsl_dereference(dp->upcall_portids);
dp_nlsk_pids = kmalloc(sizeof(*dp_nlsk_pids) + nla_len(ids),
GFP_KERNEL);
if (!dp_nlsk_pids)
return -ENOMEM;
dp_nlsk_pids->n_pids = nla_len(ids) / sizeof(u32);
nla_memcpy(dp_nlsk_pids->pids, ids, nla_len(ids));
rcu_assign_pointer(dp->upcall_portids, dp_nlsk_pids);
kfree_rcu(old, rcu);
return 0;
}
u32 ovs_dp_get_upcall_portid(const struct datapath *dp, uint32_t cpu_id)
{
struct dp_nlsk_pids *dp_nlsk_pids;
dp_nlsk_pids = rcu_dereference(dp->upcall_portids);
if (dp_nlsk_pids) {
if (cpu_id < dp_nlsk_pids->n_pids) {
return dp_nlsk_pids->pids[cpu_id];
} else if (dp_nlsk_pids->n_pids > 0 &&
cpu_id >= dp_nlsk_pids->n_pids) {
/* If the number of netlink PIDs is mismatched with
* the number of CPUs as seen by the kernel, log this
* and send the upcall to an arbitrary socket (0) in
* order to not drop packets
*/
pr_info_ratelimited("cpu_id mismatch with handler threads");
return dp_nlsk_pids->pids[cpu_id %
dp_nlsk_pids->n_pids];
} else {
return 0;
}
} else {
return 0;
}
}
static int ovs_dp_change(struct datapath *dp, struct nlattr *a[])
{
u32 user_features = 0, old_features = dp->user_features;
int err;
if (a[OVS_DP_ATTR_USER_FEATURES]) {
user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
if (user_features & ~(OVS_DP_F_VPORT_PIDS |
OVS_DP_F_UNALIGNED |
OVS_DP_F_TC_RECIRC_SHARING |
OVS_DP_F_DISPATCH_UPCALL_PER_CPU))
return -EOPNOTSUPP;
#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
if (user_features & OVS_DP_F_TC_RECIRC_SHARING)
return -EOPNOTSUPP;
#endif
}
if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) {
int err;
u32 cache_size;
cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]);
err = ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size);
if (err)
return err;
}
dp->user_features = user_features;
if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU &&
a[OVS_DP_ATTR_PER_CPU_PIDS]) {
/* Upcall Netlink Port IDs have been updated */
err = ovs_dp_set_upcall_portids(dp,
a[OVS_DP_ATTR_PER_CPU_PIDS]);
if (err)
return err;
}
if ((dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) &&
!(old_features & OVS_DP_F_TC_RECIRC_SHARING))
tc_skb_ext_tc_enable();
else if (!(dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) &&
(old_features & OVS_DP_F_TC_RECIRC_SHARING))
tc_skb_ext_tc_disable();
return 0;
}
static int ovs_dp_stats_init(struct datapath *dp)
{
dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
if (!dp->stats_percpu)
return -ENOMEM;
return 0;
}
static int ovs_dp_vport_init(struct datapath *dp)
{
int i;
dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
sizeof(struct hlist_head),
GFP_KERNEL);
if (!dp->ports)
return -ENOMEM;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(&dp->ports[i]);
return 0;
}
static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct vport_parms parms;
struct sk_buff *reply;
struct datapath *dp;
struct vport *vport;
struct ovs_net *ovs_net;
int err;
err = -EINVAL;
if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
goto err;
reply = ovs_dp_cmd_alloc_info();
if (!reply)
return -ENOMEM;
err = -ENOMEM;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (dp == NULL)
goto err_destroy_reply;
ovs_dp_set_net(dp, sock_net(skb->sk));
/* Allocate table. */
err = ovs_flow_tbl_init(&dp->table);
if (err)
goto err_destroy_dp;
err = ovs_dp_stats_init(dp);
if (err)
goto err_destroy_table;
err = ovs_dp_vport_init(dp);
if (err)
goto err_destroy_stats;
err = ovs_meters_init(dp);
if (err)
goto err_destroy_ports;
/* Set up our datapath device. */
parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
parms.type = OVS_VPORT_TYPE_INTERNAL;
parms.options = NULL;
parms.dp = dp;
parms.port_no = OVSP_LOCAL;
parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
parms.desired_ifindex = a[OVS_DP_ATTR_IFINDEX]
? nla_get_s32(a[OVS_DP_ATTR_IFINDEX]) : 0;
/* So far only local changes have been made, now need the lock. */
ovs_lock();
err = ovs_dp_change(dp, a);
if (err)
goto err_unlock_and_destroy_meters;
vport = new_vport(&parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
if (err == -EBUSY)
err = -EEXIST;
if (err == -EEXIST) {
/* An outdated user space instance that does not understand
* the concept of user_features has attempted to create a new
* datapath and is likely to reuse it. Drop all user features.
*/
if (info->genlhdr->version < OVS_DP_VER_FEATURES)
ovs_dp_reset_user_features(skb, info);
}
goto err_destroy_portids;
}
err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
info->snd_seq, 0, OVS_DP_CMD_NEW);
BUG_ON(err < 0);
ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
ovs_unlock();
ovs_notify(&dp_datapath_genl_family, reply, info);
return 0;
err_destroy_portids:
kfree(rcu_dereference_raw(dp->upcall_portids));
err_unlock_and_destroy_meters:
ovs_unlock();
ovs_meters_exit(dp);
err_destroy_ports:
kfree(dp->ports);
err_destroy_stats:
free_percpu(dp->stats_percpu);
err_destroy_table:
ovs_flow_tbl_destroy(&dp->table);
err_destroy_dp:
kfree(dp);
err_destroy_reply:
kfree_skb(reply);
err:
return err;
}
/* Called with ovs_mutex. */
static void __dp_destroy(struct datapath *dp)
{
struct flow_table *table = &dp->table;
int i;
if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING)
tc_skb_ext_tc_disable();
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
struct hlist_node *n;
hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
if (vport->port_no != OVSP_LOCAL)
ovs_dp_detach_port(vport);
}
list_del_rcu(&dp->list_node);
/* OVSP_LOCAL is datapath internal port. We need to make sure that
* all ports in datapath are destroyed first before freeing datapath.
*/
ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
/* Flush sw_flow in the tables. RCU cb only releases resource
* such as dp, ports and tables. That may avoid some issues
* such as RCU usage warning.
*/
table_instance_flow_flush(table, ovsl_dereference(table->ti),
ovsl_dereference(table->ufid_ti));
/* RCU destroy the ports, meters and flow tables. */
call_rcu(&dp->rcu, destroy_dp_rcu);
}
static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *reply;
struct datapath *dp;
int err;
reply = ovs_dp_cmd_alloc_info();
if (!reply)
return -ENOMEM;
ovs_lock();
dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
info->attrs);
err = PTR_ERR(dp);
if (IS_ERR(dp))
goto err_unlock_free;
err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
info->snd_seq, 0, OVS_DP_CMD_DEL);
BUG_ON(err < 0);
__dp_destroy(dp);
ovs_unlock();
ovs_notify(&dp_datapath_genl_family, reply, info);
return 0;
err_unlock_free:
ovs_unlock();
kfree_skb(reply);
return err;
}
static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *reply;
struct datapath *dp;
int err;
reply = ovs_dp_cmd_alloc_info();
if (!reply)
return -ENOMEM;
ovs_lock();
dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
info->attrs);
err = PTR_ERR(dp);
if (IS_ERR(dp))
goto err_unlock_free;
err = ovs_dp_change(dp, info->attrs);
if (err)
goto err_unlock_free;
err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
info->snd_seq, 0, OVS_DP_CMD_SET);
BUG_ON(err < 0);
ovs_unlock();
ovs_notify(&dp_datapath_genl_family, reply, info);
return 0;
err_unlock_free:
ovs_unlock();
kfree_skb(reply);
return err;
}
static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *reply;
struct datapath *dp;
int err;
reply = ovs_dp_cmd_alloc_info();
if (!reply)
return -ENOMEM;
ovs_lock();
dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
info->attrs);
if (IS_ERR(dp)) {
err = PTR_ERR(dp);
goto err_unlock_free;
}
err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
info->snd_seq, 0, OVS_DP_CMD_GET);
BUG_ON(err < 0);
ovs_unlock();
return genlmsg_reply(reply, info);
err_unlock_free:
ovs_unlock();
kfree_skb(reply);
return err;
}
static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
struct datapath *dp;
int skip = cb->args[0];
int i = 0;
ovs_lock();
list_for_each_entry(dp, &ovs_net->dps, list_node) {
if (i >= skip &&
ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
OVS_DP_CMD_GET) < 0)
break;
i++;
}
ovs_unlock();
cb->args[0] = i;
return skb->len;
}
static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
[OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
[OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
[OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
[OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0,
PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
[OVS_DP_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0),
};
static const struct genl_small_ops dp_datapath_genl_ops[] = {
{ .cmd = OVS_DP_CMD_NEW,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
.doit = ovs_dp_cmd_new
},
{ .cmd = OVS_DP_CMD_DEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
.doit = ovs_dp_cmd_del
},
{ .cmd = OVS_DP_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = 0, /* OK for unprivileged users. */
.doit = ovs_dp_cmd_get,
.dumpit = ovs_dp_cmd_dump
},
{ .cmd = OVS_DP_CMD_SET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
.doit = ovs_dp_cmd_set,
},
};
static struct genl_family dp_datapath_genl_family __ro_after_init = {
.hdrsize = sizeof(struct ovs_header),
.name = OVS_DATAPATH_FAMILY,
.version = OVS_DATAPATH_VERSION,
.maxattr = OVS_DP_ATTR_MAX,
.policy = datapath_policy,
.netnsok = true,
.parallel_ops = true,
.small_ops = dp_datapath_genl_ops,
.n_small_ops = ARRAY_SIZE(dp_datapath_genl_ops),
.resv_start_op = OVS_DP_CMD_SET + 1,
.mcgrps = &ovs_dp_datapath_multicast_group,
.n_mcgrps = 1,
.module = THIS_MODULE,
};
/* Called with ovs_mutex or RCU read lock. */
static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
struct net *net, u32 portid, u32 seq,
u32 flags, u8 cmd, gfp_t gfp)
{
struct ovs_header *ovs_header;
struct ovs_vport_stats vport_stats;
int err;
ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
flags, cmd);
if (!ovs_header)
return -EMSGSIZE;
ovs_header->dp_ifindex = get_dpifindex(vport->dp);
if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
nla_put_string(skb, OVS_VPORT_ATTR_NAME,
ovs_vport_name(vport)) ||
nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
goto nla_put_failure;
if (!net_eq(net, dev_net(vport->dev))) {
int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
goto nla_put_failure;
}
ovs_vport_get_stats(vport, &vport_stats);
if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
sizeof(struct ovs_vport_stats), &vport_stats,
OVS_VPORT_ATTR_PAD))
goto nla_put_failure;
if (ovs_vport_get_upcall_stats(vport, skb))
goto nla_put_failure;
if (ovs_vport_get_upcall_portids(vport, skb))
goto nla_put_failure;
err = ovs_vport_get_options(vport, skb);
if (err == -EMSGSIZE)
goto error;
genlmsg_end(skb, ovs_header);
return 0;
nla_put_failure:
err = -EMSGSIZE;
error:
genlmsg_cancel(skb, ovs_header);
return err;
}
static struct sk_buff *ovs_vport_cmd_alloc_info(void)
{
return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
}
/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
u32 portid, u32 seq, u8 cmd)
{
struct sk_buff *skb;
int retval;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
GFP_KERNEL);
BUG_ON(retval < 0);
return skb;
}
/* Called with ovs_mutex or RCU read lock. */
static struct vport *lookup_vport(struct net *net,
const struct ovs_header *ovs_header,
struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
{
struct datapath *dp;
struct vport *vport;
if (a[OVS_VPORT_ATTR_IFINDEX])
return ERR_PTR(-EOPNOTSUPP);
if (a[OVS_VPORT_ATTR_NAME]) {
vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
if (!vport)
return ERR_PTR(-ENODEV);
if (ovs_header->dp_ifindex &&
ovs_header->dp_ifindex != get_dpifindex(vport->dp))
return ERR_PTR(-ENODEV);
return vport;
} else if (a[OVS_VPORT_ATTR_PORT_NO]) {
u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
if (port_no >= DP_MAX_PORTS)
return ERR_PTR(-EFBIG);
dp = get_dp(net, ovs_header->dp_ifindex);
if (!dp)
return ERR_PTR(-ENODEV);
vport = ovs_vport_ovsl_rcu(dp, port_no);
if (!vport)
return ERR_PTR(-ENODEV);
return vport;
} else
return ERR_PTR(-EINVAL);
}
static unsigned int ovs_get_max_headroom(struct datapath *dp)
{
unsigned int dev_headroom, max_headroom = 0;
struct net_device *dev;
struct vport *vport;
int i;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
lockdep_ovsl_is_held()) {
dev = vport->dev;
dev_headroom = netdev_get_fwd_headroom(dev);
if (dev_headroom > max_headroom)
max_headroom = dev_headroom;
}
}
return max_headroom;
}
/* Called with ovs_mutex */
static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
{
struct vport *vport;
int i;
dp->max_headroom = new_headroom;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
lockdep_ovsl_is_held())
netdev_set_rx_headroom(vport->dev, new_headroom);
}
}
static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct vport_parms parms;
struct sk_buff *reply;
struct vport *vport;
struct datapath *dp;
unsigned int new_headroom;
u32 port_no;
int err;
if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
!a[OVS_VPORT_ATTR_UPCALL_PID])
return -EINVAL;
parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
if (a[OVS_VPORT_ATTR_IFINDEX] && parms.type != OVS_VPORT_TYPE_INTERNAL)
return -EOPNOTSUPP;
port_no = a[OVS_VPORT_ATTR_PORT_NO]
? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
if (port_no >= DP_MAX_PORTS)
return -EFBIG;
reply = ovs_vport_cmd_alloc_info();
if (!reply)
return -ENOMEM;
ovs_lock();
restart:
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
err = -ENODEV;
if (!dp)
goto exit_unlock_free;
if (port_no) {
vport = ovs_vport_ovsl(dp, port_no);
err = -EBUSY;
if (vport)
goto exit_unlock_free;
} else {
for (port_no = 1; ; port_no++) {
if (port_no >= DP_MAX_PORTS) {
err = -EFBIG;
goto exit_unlock_free;
}
vport = ovs_vport_ovsl(dp, port_no);
if (!vport)
break;
}
}
parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
parms.options = a[OVS_VPORT_ATTR_OPTIONS];
parms.dp = dp;
parms.port_no = port_no;
parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
parms.desired_ifindex = a[OVS_VPORT_ATTR_IFINDEX]
? nla_get_s32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
vport = new_vport(&parms);
err = PTR_ERR(vport);
if (IS_ERR(vport)) {
if (err == -EAGAIN)
goto restart;
goto exit_unlock_free;
}
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
info->snd_portid, info->snd_seq, 0,
OVS_VPORT_CMD_NEW, GFP_KERNEL);
new_headroom = netdev_get_fwd_headroom(vport->dev);
if (new_headroom > dp->max_headroom)
ovs_update_headroom(dp, new_headroom);
else
netdev_set_rx_headroom(vport->dev, dp->max_headroom);
BUG_ON(err < 0);
ovs_unlock();
ovs_notify(&dp_vport_genl_family, reply, info);
return 0;
exit_unlock_free:
ovs_unlock();
kfree_skb(reply);
return err;
}
static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct sk_buff *reply;
struct vport *vport;
int err;
reply = ovs_vport_cmd_alloc_info();
if (!reply)
return -ENOMEM;
ovs_lock();
vport = lookup_vport(sock_net(skb->sk), genl_info_userhdr(info), a);
err = PTR_ERR(vport);
if (IS_ERR(vport))
goto exit_unlock_free;
if (a[OVS_VPORT_ATTR_TYPE] &&
nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
err = -EINVAL;
goto exit_unlock_free;
}
if (a[OVS_VPORT_ATTR_OPTIONS]) {
err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
if (err)
goto exit_unlock_free;
}
if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
err = ovs_vport_set_upcall_portids(vport, ids);
if (err)
goto exit_unlock_free;
}
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
info->snd_portid, info->snd_seq, 0,
OVS_VPORT_CMD_SET, GFP_KERNEL);
BUG_ON(err < 0);
ovs_unlock();
ovs_notify(&dp_vport_genl_family, reply, info);
return 0;
exit_unlock_free:
ovs_unlock();
kfree_skb(reply);
return err;
}
static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
bool update_headroom = false;
struct nlattr **a = info->attrs;
struct sk_buff *reply;
struct datapath *dp;
struct vport *vport;
unsigned int new_headroom;
int err;
reply = ovs_vport_cmd_alloc_info();
if (!reply)
return -ENOMEM;
ovs_lock();
vport = lookup_vport(sock_net(skb->sk), genl_info_userhdr(info), a);
err = PTR_ERR(vport);
if (IS_ERR(vport))
goto exit_unlock_free;
if (vport->port_no == OVSP_LOCAL) {
err = -EINVAL;
goto exit_unlock_free;
}
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
info->snd_portid, info->snd_seq, 0,
OVS_VPORT_CMD_DEL, GFP_KERNEL);
BUG_ON(err < 0);
/* the vport deletion may trigger dp headroom update */
dp = vport->dp;
if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
update_headroom = true;
netdev_reset_rx_headroom(vport->dev);
ovs_dp_detach_port(vport);
if (update_headroom) {
new_headroom = ovs_get_max_headroom(dp);
if (new_headroom < dp->max_headroom)
ovs_update_headroom(dp, new_headroom);
}
ovs_unlock();
ovs_notify(&dp_vport_genl_family, reply, info);
return 0;
exit_unlock_free:
ovs_unlock();
kfree_skb(reply);
return err;
}
static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct sk_buff *reply;
struct vport *vport;
int err;
reply = ovs_vport_cmd_alloc_info();
if (!reply)
return -ENOMEM;
rcu_read_lock();
vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
err = PTR_ERR(vport);
if (IS_ERR(vport))
goto exit_unlock_free;
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
info->snd_portid, info->snd_seq, 0,
OVS_VPORT_CMD_GET, GFP_ATOMIC);
BUG_ON(err < 0);
rcu_read_unlock();
return genlmsg_reply(reply, info);
exit_unlock_free:
rcu_read_unlock();
kfree_skb(reply);
return err;
}
static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
struct datapath *dp;
int bucket = cb->args[0], skip = cb->args[1];
int i, j = 0;
rcu_read_lock();
dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
rcu_read_unlock();
return -ENODEV;
}
for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
j = 0;
hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
if (j >= skip &&
ovs_vport_cmd_fill_info(vport, skb,
sock_net(skb->sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
OVS_VPORT_CMD_GET,
GFP_ATOMIC) < 0)
goto out;
j++;
}
skip = 0;
}
out:
rcu_read_unlock();
cb->args[0] = i;
cb->args[1] = j;
return skb->len;
}
static void ovs_dp_masks_rebalance(struct work_struct *work)
{
struct ovs_net *ovs_net = container_of(work, struct ovs_net,
masks_rebalance.work);
struct datapath *dp;
ovs_lock();
list_for_each_entry(dp, &ovs_net->dps, list_node)
ovs_flow_masks_rebalance(&dp->table);
ovs_unlock();
schedule_delayed_work(&ovs_net->masks_rebalance,
msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
}
static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
[OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
[OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
[OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
[OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
[OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
[OVS_VPORT_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0),
[OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
[OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED },
};
static const struct genl_small_ops dp_vport_genl_ops[] = {
{ .cmd = OVS_VPORT_CMD_NEW,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
.doit = ovs_vport_cmd_new
},
{ .cmd = OVS_VPORT_CMD_DEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
.doit = ovs_vport_cmd_del
},
{ .cmd = OVS_VPORT_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = 0, /* OK for unprivileged users. */
.doit = ovs_vport_cmd_get,
.dumpit = ovs_vport_cmd_dump
},
{ .cmd = OVS_VPORT_CMD_SET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
.doit = ovs_vport_cmd_set,
},
};
struct genl_family dp_vport_genl_family __ro_after_init = {
.hdrsize = sizeof(struct ovs_header),
.name = OVS_VPORT_FAMILY,
.version = OVS_VPORT_VERSION,
.maxattr = OVS_VPORT_ATTR_MAX,
.policy = vport_policy,
.netnsok = true,
.parallel_ops = true,
.small_ops = dp_vport_genl_ops,
.n_small_ops = ARRAY_SIZE(dp_vport_genl_ops),
.resv_start_op = OVS_VPORT_CMD_SET + 1,
.mcgrps = &ovs_dp_vport_multicast_group,
.n_mcgrps = 1,
.module = THIS_MODULE,
};
static struct genl_family * const dp_genl_families[] = {
&dp_datapath_genl_family,
&dp_vport_genl_family,
&dp_flow_genl_family,
&dp_packet_genl_family,
&dp_meter_genl_family,
#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
&dp_ct_limit_genl_family,
#endif
};
static void dp_unregister_genl(int n_families)
{
int i;
for (i = 0; i < n_families; i++)
genl_unregister_family(dp_genl_families[i]);
}
static int __init dp_register_genl(void)
{
int err;
int i;
for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
err = genl_register_family(dp_genl_families[i]);
if (err)
goto error;
}
return 0;
error:
dp_unregister_genl(i);
return err;
}
static int __net_init ovs_init_net(struct net *net)
{
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
int err;
INIT_LIST_HEAD(&ovs_net->dps);
INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
INIT_DELAYED_WORK(&ovs_net->masks_rebalance, ovs_dp_masks_rebalance);
err = ovs_ct_init(net);
if (err)
return err;
schedule_delayed_work(&ovs_net->masks_rebalance,
msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
return 0;
}
static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
struct list_head *head)
{
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
struct datapath *dp;
list_for_each_entry(dp, &ovs_net->dps, list_node) {
int i;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
continue;
if (dev_net(vport->dev) == dnet)
list_add(&vport->detach_list, head);
}
}
}
}
static void __net_exit ovs_exit_net(struct net *dnet)
{
struct datapath *dp, *dp_next;
struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
struct vport *vport, *vport_next;
struct net *net;
LIST_HEAD(head);
ovs_lock();
ovs_ct_exit(dnet);
list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
__dp_destroy(dp);
down_read(&net_rwsem);
for_each_net(net)
list_vports_from_net(net, dnet, &head);
up_read(&net_rwsem);
/* Detach all vports from given namespace. */
list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
list_del(&vport->detach_list);
ovs_dp_detach_port(vport);
}
ovs_unlock();
cancel_delayed_work_sync(&ovs_net->masks_rebalance);
cancel_work_sync(&ovs_net->dp_notify_work);
}
static struct pernet_operations ovs_net_ops = {
.init = ovs_init_net,
.exit = ovs_exit_net,
.id = &ovs_net_id,
.size = sizeof(struct ovs_net),
};
static const char * const ovs_drop_reasons[] = {
#define S(x) (#x),
OVS_DROP_REASONS(S)
#undef S
};
static struct drop_reason_list drop_reason_list_ovs = {
.reasons = ovs_drop_reasons,
.n_reasons = ARRAY_SIZE(ovs_drop_reasons),
};
static int __init dp_init(void)
{
int err;
BUILD_BUG_ON(sizeof(struct ovs_skb_cb) >
sizeof_field(struct sk_buff, cb));
pr_info("Open vSwitch switching datapath\n");
err = action_fifos_init();
if (err)
goto error;
err = ovs_internal_dev_rtnl_link_register();
if (err)
goto error_action_fifos_exit;
err = ovs_flow_init();
if (err)
goto error_unreg_rtnl_link;
err = ovs_vport_init();
if (err)
goto error_flow_exit;
err = register_pernet_device(&ovs_net_ops);
if (err)
goto error_vport_exit;
err = register_netdevice_notifier(&ovs_dp_device_notifier);
if (err)
goto error_netns_exit;
err = ovs_netdev_init();
if (err)
goto error_unreg_notifier;
err = dp_register_genl();
if (err < 0)
goto error_unreg_netdev;
drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_OPENVSWITCH,
&drop_reason_list_ovs);
return 0;
error_unreg_netdev:
ovs_netdev_exit();
error_unreg_notifier:
unregister_netdevice_notifier(&ovs_dp_device_notifier);
error_netns_exit:
unregister_pernet_device(&ovs_net_ops);
error_vport_exit:
ovs_vport_exit();
error_flow_exit:
ovs_flow_exit();
error_unreg_rtnl_link:
ovs_internal_dev_rtnl_link_unregister();
error_action_fifos_exit:
action_fifos_exit();
error:
return err;
}
static void dp_cleanup(void)
{
dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
ovs_netdev_exit();
unregister_netdevice_notifier(&ovs_dp_device_notifier);
unregister_pernet_device(&ovs_net_ops);
drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_OPENVSWITCH);
rcu_barrier();
ovs_vport_exit();
ovs_flow_exit();
ovs_internal_dev_rtnl_link_unregister();
action_fifos_exit();
}
module_init(dp_init);
module_exit(dp_cleanup);
MODULE_DESCRIPTION("Open vSwitch switching datapath");
MODULE_LICENSE("GPL");
MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
MODULE_ALIAS_GENL_FAMILY(OVS_METER_FAMILY);
MODULE_ALIAS_GENL_FAMILY(OVS_CT_LIMIT_FAMILY);
| linux-master | net/openvswitch/datapath.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2012 Nicira, Inc.
*/
#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <net/rtnetlink.h>
#include "datapath.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
struct internal_dev {
struct vport *vport;
};
static struct vport_ops ovs_internal_vport_ops;
static struct internal_dev *internal_dev_priv(struct net_device *netdev)
{
return netdev_priv(netdev);
}
/* Called with rcu_read_lock_bh. */
static netdev_tx_t
internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
int len, err;
/* store len value because skb can be freed inside ovs_vport_receive() */
len = skb->len;
rcu_read_lock();
err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
rcu_read_unlock();
if (likely(!err))
dev_sw_netstats_tx_add(netdev, 1, len);
else
netdev->stats.tx_errors++;
return NETDEV_TX_OK;
}
static int internal_dev_open(struct net_device *netdev)
{
netif_start_queue(netdev);
return 0;
}
static int internal_dev_stop(struct net_device *netdev)
{
netif_stop_queue(netdev);
return 0;
}
static void internal_dev_getinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strscpy(info->driver, "openvswitch", sizeof(info->driver));
}
static const struct ethtool_ops internal_dev_ethtool_ops = {
.get_drvinfo = internal_dev_getinfo,
.get_link = ethtool_op_get_link,
};
static void internal_dev_destructor(struct net_device *dev)
{
struct vport *vport = ovs_internal_dev_get_vport(dev);
ovs_vport_free(vport);
}
static const struct net_device_ops internal_dev_netdev_ops = {
.ndo_open = internal_dev_open,
.ndo_stop = internal_dev_stop,
.ndo_start_xmit = internal_dev_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = dev_get_tstats64,
};
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
.kind = "openvswitch",
};
static void do_setup(struct net_device *netdev)
{
ether_setup(netdev);
netdev->max_mtu = ETH_MAX_MTU;
netdev->netdev_ops = &internal_dev_netdev_ops;
netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
IFF_NO_QUEUE;
netdev->needs_free_netdev = true;
netdev->priv_destructor = NULL;
netdev->ethtool_ops = &internal_dev_ethtool_ops;
netdev->rtnl_link_ops = &internal_dev_link_ops;
netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL;
netdev->vlan_features = netdev->features;
netdev->hw_enc_features = netdev->features;
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
eth_hw_addr_random(netdev);
}
static struct vport *internal_dev_create(const struct vport_parms *parms)
{
struct vport *vport;
struct internal_dev *internal_dev;
struct net_device *dev;
int err;
vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
}
dev = alloc_netdev(sizeof(struct internal_dev),
parms->name, NET_NAME_USER, do_setup);
vport->dev = dev;
if (!vport->dev) {
err = -ENOMEM;
goto error_free_vport;
}
vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!vport->dev->tstats) {
err = -ENOMEM;
goto error_free_netdev;
}
dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
dev->ifindex = parms->desired_ifindex;
internal_dev = internal_dev_priv(vport->dev);
internal_dev->vport = vport;
/* Restrict bridge port to current netns. */
if (vport->port_no == OVSP_LOCAL)
vport->dev->features |= NETIF_F_NETNS_LOCAL;
rtnl_lock();
err = register_netdevice(vport->dev);
if (err)
goto error_unlock;
vport->dev->priv_destructor = internal_dev_destructor;
dev_set_promiscuity(vport->dev, 1);
rtnl_unlock();
netif_start_queue(vport->dev);
return vport;
error_unlock:
rtnl_unlock();
free_percpu(dev->tstats);
error_free_netdev:
free_netdev(dev);
error_free_vport:
ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
static void internal_dev_destroy(struct vport *vport)
{
netif_stop_queue(vport->dev);
rtnl_lock();
dev_set_promiscuity(vport->dev, -1);
/* unregister_netdevice() waits for an RCU grace period. */
unregister_netdevice(vport->dev);
free_percpu(vport->dev->tstats);
rtnl_unlock();
}
static int internal_dev_recv(struct sk_buff *skb)
{
struct net_device *netdev = skb->dev;
if (unlikely(!(netdev->flags & IFF_UP))) {
kfree_skb(skb);
netdev->stats.rx_dropped++;
return NETDEV_TX_OK;
}
skb_dst_drop(skb);
nf_reset_ct(skb);
secpath_reset(skb);
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, netdev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
dev_sw_netstats_rx_add(netdev, skb->len);
netif_rx(skb);
return NETDEV_TX_OK;
}
static struct vport_ops ovs_internal_vport_ops = {
.type = OVS_VPORT_TYPE_INTERNAL,
.create = internal_dev_create,
.destroy = internal_dev_destroy,
.send = internal_dev_recv,
};
int ovs_is_internal_dev(const struct net_device *netdev)
{
return netdev->netdev_ops == &internal_dev_netdev_ops;
}
struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
{
if (!ovs_is_internal_dev(netdev))
return NULL;
return internal_dev_priv(netdev)->vport;
}
int ovs_internal_dev_rtnl_link_register(void)
{
int err;
err = rtnl_link_register(&internal_dev_link_ops);
if (err < 0)
return err;
err = ovs_vport_ops_register(&ovs_internal_vport_ops);
if (err < 0)
rtnl_link_unregister(&internal_dev_link_ops);
return err;
}
void ovs_internal_dev_rtnl_link_unregister(void)
{
ovs_vport_ops_unregister(&ovs_internal_vport_ops);
rtnl_link_unregister(&internal_dev_link_ops);
}
| linux-master | net/openvswitch/vport-internal_dev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017 Nicira, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/openvswitch.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include "datapath.h"
#include "meter.h"
static const struct nla_policy meter_policy[OVS_METER_ATTR_MAX + 1] = {
[OVS_METER_ATTR_ID] = { .type = NLA_U32, },
[OVS_METER_ATTR_KBPS] = { .type = NLA_FLAG },
[OVS_METER_ATTR_STATS] = { .len = sizeof(struct ovs_flow_stats) },
[OVS_METER_ATTR_BANDS] = { .type = NLA_NESTED },
[OVS_METER_ATTR_USED] = { .type = NLA_U64 },
[OVS_METER_ATTR_CLEAR] = { .type = NLA_FLAG },
[OVS_METER_ATTR_MAX_METERS] = { .type = NLA_U32 },
[OVS_METER_ATTR_MAX_BANDS] = { .type = NLA_U32 },
};
static const struct nla_policy band_policy[OVS_BAND_ATTR_MAX + 1] = {
[OVS_BAND_ATTR_TYPE] = { .type = NLA_U32, },
[OVS_BAND_ATTR_RATE] = { .type = NLA_U32, },
[OVS_BAND_ATTR_BURST] = { .type = NLA_U32, },
[OVS_BAND_ATTR_STATS] = { .len = sizeof(struct ovs_flow_stats) },
};
static u32 meter_hash(struct dp_meter_instance *ti, u32 id)
{
return id % ti->n_meters;
}
static void ovs_meter_free(struct dp_meter *meter)
{
if (!meter)
return;
kfree_rcu(meter, rcu);
}
/* Call with ovs_mutex or RCU read lock. */
static struct dp_meter *lookup_meter(const struct dp_meter_table *tbl,
u32 meter_id)
{
struct dp_meter_instance *ti = rcu_dereference_ovsl(tbl->ti);
u32 hash = meter_hash(ti, meter_id);
struct dp_meter *meter;
meter = rcu_dereference_ovsl(ti->dp_meters[hash]);
if (meter && likely(meter->id == meter_id))
return meter;
return NULL;
}
static struct dp_meter_instance *dp_meter_instance_alloc(const u32 size)
{
struct dp_meter_instance *ti;
ti = kvzalloc(struct_size(ti, dp_meters, size), GFP_KERNEL);
if (!ti)
return NULL;
ti->n_meters = size;
return ti;
}
static void dp_meter_instance_free(struct dp_meter_instance *ti)
{
kvfree(ti);
}
static void dp_meter_instance_free_rcu(struct rcu_head *rcu)
{
struct dp_meter_instance *ti;
ti = container_of(rcu, struct dp_meter_instance, rcu);
kvfree(ti);
}
static int
dp_meter_instance_realloc(struct dp_meter_table *tbl, u32 size)
{
struct dp_meter_instance *ti = rcu_dereference_ovsl(tbl->ti);
int n_meters = min(size, ti->n_meters);
struct dp_meter_instance *new_ti;
int i;
new_ti = dp_meter_instance_alloc(size);
if (!new_ti)
return -ENOMEM;
for (i = 0; i < n_meters; i++)
if (rcu_dereference_ovsl(ti->dp_meters[i]))
new_ti->dp_meters[i] = ti->dp_meters[i];
rcu_assign_pointer(tbl->ti, new_ti);
call_rcu(&ti->rcu, dp_meter_instance_free_rcu);
return 0;
}
static void dp_meter_instance_insert(struct dp_meter_instance *ti,
struct dp_meter *meter)
{
u32 hash;
hash = meter_hash(ti, meter->id);
rcu_assign_pointer(ti->dp_meters[hash], meter);
}
static void dp_meter_instance_remove(struct dp_meter_instance *ti,
struct dp_meter *meter)
{
u32 hash;
hash = meter_hash(ti, meter->id);
RCU_INIT_POINTER(ti->dp_meters[hash], NULL);
}
static int attach_meter(struct dp_meter_table *tbl, struct dp_meter *meter)
{
struct dp_meter_instance *ti = rcu_dereference_ovsl(tbl->ti);
u32 hash = meter_hash(ti, meter->id);
int err;
/* In generally, slots selected should be empty, because
* OvS uses id-pool to fetch a available id.
*/
if (unlikely(rcu_dereference_ovsl(ti->dp_meters[hash])))
return -EBUSY;
dp_meter_instance_insert(ti, meter);
/* That function is thread-safe. */
tbl->count++;
if (tbl->count >= tbl->max_meters_allowed) {
err = -EFBIG;
goto attach_err;
}
if (tbl->count >= ti->n_meters &&
dp_meter_instance_realloc(tbl, ti->n_meters * 2)) {
err = -ENOMEM;
goto attach_err;
}
return 0;
attach_err:
dp_meter_instance_remove(ti, meter);
tbl->count--;
return err;
}
static int detach_meter(struct dp_meter_table *tbl, struct dp_meter *meter)
{
struct dp_meter_instance *ti;
ASSERT_OVSL();
if (!meter)
return 0;
ti = rcu_dereference_ovsl(tbl->ti);
dp_meter_instance_remove(ti, meter);
tbl->count--;
/* Shrink the meter array if necessary. */
if (ti->n_meters > DP_METER_ARRAY_SIZE_MIN &&
tbl->count <= (ti->n_meters / 4)) {
int half_size = ti->n_meters / 2;
int i;
/* Avoid hash collision, don't move slots to other place.
* Make sure there are no references of meters in array
* which will be released.
*/
for (i = half_size; i < ti->n_meters; i++)
if (rcu_dereference_ovsl(ti->dp_meters[i]))
goto out;
if (dp_meter_instance_realloc(tbl, half_size))
goto shrink_err;
}
out:
return 0;
shrink_err:
dp_meter_instance_insert(ti, meter);
tbl->count++;
return -ENOMEM;
}
static struct sk_buff *
ovs_meter_cmd_reply_start(struct genl_info *info, u8 cmd,
struct ovs_header **ovs_reply_header)
{
struct sk_buff *skb;
struct ovs_header *ovs_header = genl_info_userhdr(info);
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
return ERR_PTR(-ENOMEM);
*ovs_reply_header = genlmsg_put(skb, info->snd_portid,
info->snd_seq,
&dp_meter_genl_family, 0, cmd);
if (!*ovs_reply_header) {
nlmsg_free(skb);
return ERR_PTR(-EMSGSIZE);
}
(*ovs_reply_header)->dp_ifindex = ovs_header->dp_ifindex;
return skb;
}
static int ovs_meter_cmd_reply_stats(struct sk_buff *reply, u32 meter_id,
struct dp_meter *meter)
{
struct nlattr *nla;
struct dp_meter_band *band;
u16 i;
if (nla_put_u32(reply, OVS_METER_ATTR_ID, meter_id))
goto error;
if (nla_put(reply, OVS_METER_ATTR_STATS,
sizeof(struct ovs_flow_stats), &meter->stats))
goto error;
if (nla_put_u64_64bit(reply, OVS_METER_ATTR_USED, meter->used,
OVS_METER_ATTR_PAD))
goto error;
nla = nla_nest_start_noflag(reply, OVS_METER_ATTR_BANDS);
if (!nla)
goto error;
band = meter->bands;
for (i = 0; i < meter->n_bands; ++i, ++band) {
struct nlattr *band_nla;
band_nla = nla_nest_start_noflag(reply, OVS_BAND_ATTR_UNSPEC);
if (!band_nla || nla_put(reply, OVS_BAND_ATTR_STATS,
sizeof(struct ovs_flow_stats),
&band->stats))
goto error;
nla_nest_end(reply, band_nla);
}
nla_nest_end(reply, nla);
return 0;
error:
return -EMSGSIZE;
}
static int ovs_meter_cmd_features(struct sk_buff *skb, struct genl_info *info)
{
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct ovs_header *ovs_reply_header;
struct nlattr *nla, *band_nla;
struct sk_buff *reply;
struct datapath *dp;
int err = -EMSGSIZE;
reply = ovs_meter_cmd_reply_start(info, OVS_METER_CMD_FEATURES,
&ovs_reply_header);
if (IS_ERR(reply))
return PTR_ERR(reply);
ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
err = -ENODEV;
goto exit_unlock;
}
if (nla_put_u32(reply, OVS_METER_ATTR_MAX_METERS,
dp->meter_tbl.max_meters_allowed))
goto exit_unlock;
ovs_unlock();
if (nla_put_u32(reply, OVS_METER_ATTR_MAX_BANDS, DP_MAX_BANDS))
goto nla_put_failure;
nla = nla_nest_start_noflag(reply, OVS_METER_ATTR_BANDS);
if (!nla)
goto nla_put_failure;
band_nla = nla_nest_start_noflag(reply, OVS_BAND_ATTR_UNSPEC);
if (!band_nla)
goto nla_put_failure;
/* Currently only DROP band type is supported. */
if (nla_put_u32(reply, OVS_BAND_ATTR_TYPE, OVS_METER_BAND_TYPE_DROP))
goto nla_put_failure;
nla_nest_end(reply, band_nla);
nla_nest_end(reply, nla);
genlmsg_end(reply, ovs_reply_header);
return genlmsg_reply(reply, info);
exit_unlock:
ovs_unlock();
nla_put_failure:
nlmsg_free(reply);
return err;
}
static struct dp_meter *dp_meter_create(struct nlattr **a)
{
struct nlattr *nla;
int rem;
u16 n_bands = 0;
struct dp_meter *meter;
struct dp_meter_band *band;
int err;
/* Validate attributes, count the bands. */
if (!a[OVS_METER_ATTR_BANDS])
return ERR_PTR(-EINVAL);
nla_for_each_nested(nla, a[OVS_METER_ATTR_BANDS], rem)
if (++n_bands > DP_MAX_BANDS)
return ERR_PTR(-EINVAL);
/* Allocate and set up the meter before locking anything. */
meter = kzalloc(struct_size(meter, bands, n_bands), GFP_KERNEL_ACCOUNT);
if (!meter)
return ERR_PTR(-ENOMEM);
meter->id = nla_get_u32(a[OVS_METER_ATTR_ID]);
meter->used = div_u64(ktime_get_ns(), 1000 * 1000);
meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0;
meter->keep_stats = !a[OVS_METER_ATTR_CLEAR];
spin_lock_init(&meter->lock);
if (meter->keep_stats && a[OVS_METER_ATTR_STATS]) {
meter->stats = *(struct ovs_flow_stats *)
nla_data(a[OVS_METER_ATTR_STATS]);
}
meter->n_bands = n_bands;
/* Set up meter bands. */
band = meter->bands;
nla_for_each_nested(nla, a[OVS_METER_ATTR_BANDS], rem) {
struct nlattr *attr[OVS_BAND_ATTR_MAX + 1];
u32 band_max_delta_t;
err = nla_parse_deprecated((struct nlattr **)&attr,
OVS_BAND_ATTR_MAX, nla_data(nla),
nla_len(nla), band_policy, NULL);
if (err)
goto exit_free_meter;
if (!attr[OVS_BAND_ATTR_TYPE] ||
!attr[OVS_BAND_ATTR_RATE] ||
!attr[OVS_BAND_ATTR_BURST]) {
err = -EINVAL;
goto exit_free_meter;
}
band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]);
band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]);
if (band->rate == 0) {
err = -EINVAL;
goto exit_free_meter;
}
band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]);
/* Figure out max delta_t that is enough to fill any bucket.
* Keep max_delta_t size to the bucket units:
* pkts => 1/1000 packets, kilobits => bits.
*
* Start with a full bucket.
*/
band->bucket = band->burst_size * 1000ULL;
band_max_delta_t = div_u64(band->bucket, band->rate);
if (band_max_delta_t > meter->max_delta_t)
meter->max_delta_t = band_max_delta_t;
band++;
}
return meter;
exit_free_meter:
kfree(meter);
return ERR_PTR(err);
}
static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct dp_meter *meter, *old_meter;
struct sk_buff *reply;
struct ovs_header *ovs_reply_header;
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct dp_meter_table *meter_tbl;
struct datapath *dp;
int err;
u32 meter_id;
bool failed;
if (!a[OVS_METER_ATTR_ID])
return -EINVAL;
meter = dp_meter_create(a);
if (IS_ERR(meter))
return PTR_ERR(meter);
reply = ovs_meter_cmd_reply_start(info, OVS_METER_CMD_SET,
&ovs_reply_header);
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
goto exit_free_meter;
}
ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
err = -ENODEV;
goto exit_unlock;
}
meter_tbl = &dp->meter_tbl;
meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]);
old_meter = lookup_meter(meter_tbl, meter_id);
err = detach_meter(meter_tbl, old_meter);
if (err)
goto exit_unlock;
err = attach_meter(meter_tbl, meter);
if (err)
goto exit_free_old_meter;
ovs_unlock();
/* Build response with the meter_id and stats from
* the old meter, if any.
*/
failed = nla_put_u32(reply, OVS_METER_ATTR_ID, meter_id);
WARN_ON(failed);
if (old_meter) {
spin_lock_bh(&old_meter->lock);
if (old_meter->keep_stats) {
err = ovs_meter_cmd_reply_stats(reply, meter_id,
old_meter);
WARN_ON(err);
}
spin_unlock_bh(&old_meter->lock);
ovs_meter_free(old_meter);
}
genlmsg_end(reply, ovs_reply_header);
return genlmsg_reply(reply, info);
exit_free_old_meter:
ovs_meter_free(old_meter);
exit_unlock:
ovs_unlock();
nlmsg_free(reply);
exit_free_meter:
kfree(meter);
return err;
}
static int ovs_meter_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct ovs_header *ovs_reply_header;
struct nlattr **a = info->attrs;
struct dp_meter *meter;
struct sk_buff *reply;
struct datapath *dp;
u32 meter_id;
int err;
if (!a[OVS_METER_ATTR_ID])
return -EINVAL;
meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]);
reply = ovs_meter_cmd_reply_start(info, OVS_METER_CMD_GET,
&ovs_reply_header);
if (IS_ERR(reply))
return PTR_ERR(reply);
ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
err = -ENODEV;
goto exit_unlock;
}
/* Locate meter, copy stats. */
meter = lookup_meter(&dp->meter_tbl, meter_id);
if (!meter) {
err = -ENOENT;
goto exit_unlock;
}
spin_lock_bh(&meter->lock);
err = ovs_meter_cmd_reply_stats(reply, meter_id, meter);
spin_unlock_bh(&meter->lock);
if (err)
goto exit_unlock;
ovs_unlock();
genlmsg_end(reply, ovs_reply_header);
return genlmsg_reply(reply, info);
exit_unlock:
ovs_unlock();
nlmsg_free(reply);
return err;
}
static int ovs_meter_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
struct ovs_header *ovs_header = genl_info_userhdr(info);
struct ovs_header *ovs_reply_header;
struct nlattr **a = info->attrs;
struct dp_meter *old_meter;
struct sk_buff *reply;
struct datapath *dp;
u32 meter_id;
int err;
if (!a[OVS_METER_ATTR_ID])
return -EINVAL;
reply = ovs_meter_cmd_reply_start(info, OVS_METER_CMD_DEL,
&ovs_reply_header);
if (IS_ERR(reply))
return PTR_ERR(reply);
ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
err = -ENODEV;
goto exit_unlock;
}
meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]);
old_meter = lookup_meter(&dp->meter_tbl, meter_id);
if (old_meter) {
spin_lock_bh(&old_meter->lock);
err = ovs_meter_cmd_reply_stats(reply, meter_id, old_meter);
WARN_ON(err);
spin_unlock_bh(&old_meter->lock);
err = detach_meter(&dp->meter_tbl, old_meter);
if (err)
goto exit_unlock;
}
ovs_unlock();
ovs_meter_free(old_meter);
genlmsg_end(reply, ovs_reply_header);
return genlmsg_reply(reply, info);
exit_unlock:
ovs_unlock();
nlmsg_free(reply);
return err;
}
/* Meter action execution.
*
* Return true 'meter_id' drop band is triggered. The 'skb' should be
* dropped by the caller'.
*/
bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key, u32 meter_id)
{
long long int now_ms = div_u64(ktime_get_ns(), 1000 * 1000);
long long int long_delta_ms;
struct dp_meter_band *band;
struct dp_meter *meter;
int i, band_exceeded_max = -1;
u32 band_exceeded_rate = 0;
u32 delta_ms;
u32 cost;
meter = lookup_meter(&dp->meter_tbl, meter_id);
/* Do not drop the packet when there is no meter. */
if (!meter)
return false;
/* Lock the meter while using it. */
spin_lock(&meter->lock);
long_delta_ms = (now_ms - meter->used); /* ms */
if (long_delta_ms < 0) {
/* This condition means that we have several threads fighting
* for a meter lock, and the one who received the packets a
* bit later wins. Assuming that all racing threads received
* packets at the same time to avoid overflow.
*/
long_delta_ms = 0;
}
/* Make sure delta_ms will not be too large, so that bucket will not
* wrap around below.
*/
delta_ms = (long_delta_ms > (long long int)meter->max_delta_t)
? meter->max_delta_t : (u32)long_delta_ms;
/* Update meter statistics.
*/
meter->used = now_ms;
meter->stats.n_packets += 1;
meter->stats.n_bytes += skb->len;
/* Bucket rate is either in kilobits per second, or in packets per
* second. We maintain the bucket in the units of either bits or
* 1/1000th of a packet, correspondingly.
* Then, when rate is multiplied with milliseconds, we get the
* bucket units:
* msec * kbps = bits, and
* msec * packets/sec = 1/1000 packets.
*
* 'cost' is the number of bucket units in this packet.
*/
cost = (meter->kbps) ? skb->len * 8 : 1000;
/* Update all bands and find the one hit with the highest rate. */
for (i = 0; i < meter->n_bands; ++i) {
long long int max_bucket_size;
band = &meter->bands[i];
max_bucket_size = band->burst_size * 1000LL;
band->bucket += delta_ms * band->rate;
if (band->bucket > max_bucket_size)
band->bucket = max_bucket_size;
if (band->bucket >= cost) {
band->bucket -= cost;
} else if (band->rate > band_exceeded_rate) {
band_exceeded_rate = band->rate;
band_exceeded_max = i;
}
}
if (band_exceeded_max >= 0) {
/* Update band statistics. */
band = &meter->bands[band_exceeded_max];
band->stats.n_packets += 1;
band->stats.n_bytes += skb->len;
/* Drop band triggered, let the caller drop the 'skb'. */
if (band->type == OVS_METER_BAND_TYPE_DROP) {
spin_unlock(&meter->lock);
return true;
}
}
spin_unlock(&meter->lock);
return false;
}
static const struct genl_small_ops dp_meter_genl_ops[] = {
{ .cmd = OVS_METER_CMD_FEATURES,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = 0, /* OK for unprivileged users. */
.doit = ovs_meter_cmd_features
},
{ .cmd = OVS_METER_CMD_SET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN
* privilege.
*/
.doit = ovs_meter_cmd_set,
},
{ .cmd = OVS_METER_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = 0, /* OK for unprivileged users. */
.doit = ovs_meter_cmd_get,
},
{ .cmd = OVS_METER_CMD_DEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN
* privilege.
*/
.doit = ovs_meter_cmd_del
},
};
static const struct genl_multicast_group ovs_meter_multicast_group = {
.name = OVS_METER_MCGROUP,
};
struct genl_family dp_meter_genl_family __ro_after_init = {
.hdrsize = sizeof(struct ovs_header),
.name = OVS_METER_FAMILY,
.version = OVS_METER_VERSION,
.maxattr = OVS_METER_ATTR_MAX,
.policy = meter_policy,
.netnsok = true,
.parallel_ops = true,
.small_ops = dp_meter_genl_ops,
.n_small_ops = ARRAY_SIZE(dp_meter_genl_ops),
.resv_start_op = OVS_METER_CMD_GET + 1,
.mcgrps = &ovs_meter_multicast_group,
.n_mcgrps = 1,
.module = THIS_MODULE,
};
int ovs_meters_init(struct datapath *dp)
{
struct dp_meter_table *tbl = &dp->meter_tbl;
struct dp_meter_instance *ti;
unsigned long free_mem_bytes;
ti = dp_meter_instance_alloc(DP_METER_ARRAY_SIZE_MIN);
if (!ti)
return -ENOMEM;
/* Allow meters in a datapath to use ~3.12% of physical memory. */
free_mem_bytes = nr_free_buffer_pages() * (PAGE_SIZE >> 5);
tbl->max_meters_allowed = min(free_mem_bytes / sizeof(struct dp_meter),
DP_METER_NUM_MAX);
if (!tbl->max_meters_allowed)
goto out_err;
rcu_assign_pointer(tbl->ti, ti);
tbl->count = 0;
return 0;
out_err:
dp_meter_instance_free(ti);
return -ENOMEM;
}
void ovs_meters_exit(struct datapath *dp)
{
struct dp_meter_table *tbl = &dp->meter_tbl;
struct dp_meter_instance *ti = rcu_dereference_raw(tbl->ti);
int i;
for (i = 0; i < ti->n_meters; i++)
ovs_meter_free(rcu_dereference_raw(ti->dp_meters[i]));
dp_meter_instance_free(ti);
}
| linux-master | net/openvswitch/meter.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel Connection Multiplexor
*
* Copyright (c) 2016 Tom Herbert <[email protected]>
*/
#include <linux/bpf.h>
#include <linux/errno.h>
#include <linux/errqueue.h>
#include <linux/file.h>
#include <linux/filter.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/syscalls.h>
#include <linux/sched/signal.h>
#include <net/kcm.h>
#include <net/netns/generic.h>
#include <net/sock.h>
#include <uapi/linux/kcm.h>
#include <trace/events/sock.h>
unsigned int kcm_net_id;
static struct kmem_cache *kcm_psockp __read_mostly;
static struct kmem_cache *kcm_muxp __read_mostly;
static struct workqueue_struct *kcm_wq;
static inline struct kcm_sock *kcm_sk(const struct sock *sk)
{
return (struct kcm_sock *)sk;
}
static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
{
return (struct kcm_tx_msg *)skb->cb;
}
static void report_csk_error(struct sock *csk, int err)
{
csk->sk_err = EPIPE;
sk_error_report(csk);
}
static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
bool wakeup_kcm)
{
struct sock *csk = psock->sk;
struct kcm_mux *mux = psock->mux;
/* Unrecoverable error in transmit */
spin_lock_bh(&mux->lock);
if (psock->tx_stopped) {
spin_unlock_bh(&mux->lock);
return;
}
psock->tx_stopped = 1;
KCM_STATS_INCR(psock->stats.tx_aborts);
if (!psock->tx_kcm) {
/* Take off psocks_avail list */
list_del(&psock->psock_avail_list);
} else if (wakeup_kcm) {
/* In this case psock is being aborted while outside of
* write_msgs and psock is reserved. Schedule tx_work
* to handle the failure there. Need to commit tx_stopped
* before queuing work.
*/
smp_mb();
queue_work(kcm_wq, &psock->tx_kcm->tx_work);
}
spin_unlock_bh(&mux->lock);
/* Report error on lower socket */
report_csk_error(csk, err);
}
/* RX mux lock held. */
static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
struct kcm_psock *psock)
{
STRP_STATS_ADD(mux->stats.rx_bytes,
psock->strp.stats.bytes -
psock->saved_rx_bytes);
mux->stats.rx_msgs +=
psock->strp.stats.msgs - psock->saved_rx_msgs;
psock->saved_rx_msgs = psock->strp.stats.msgs;
psock->saved_rx_bytes = psock->strp.stats.bytes;
}
static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
struct kcm_psock *psock)
{
KCM_STATS_ADD(mux->stats.tx_bytes,
psock->stats.tx_bytes - psock->saved_tx_bytes);
mux->stats.tx_msgs +=
psock->stats.tx_msgs - psock->saved_tx_msgs;
psock->saved_tx_msgs = psock->stats.tx_msgs;
psock->saved_tx_bytes = psock->stats.tx_bytes;
}
static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
/* KCM is ready to receive messages on its queue-- either the KCM is new or
* has become unblocked after being blocked on full socket buffer. Queue any
* pending ready messages on a psock. RX mux lock held.
*/
static void kcm_rcv_ready(struct kcm_sock *kcm)
{
struct kcm_mux *mux = kcm->mux;
struct kcm_psock *psock;
struct sk_buff *skb;
if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
return;
while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
/* Assuming buffer limit has been reached */
skb_queue_head(&mux->rx_hold_queue, skb);
WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
return;
}
}
while (!list_empty(&mux->psocks_ready)) {
psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
psock_ready_list);
if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
/* Assuming buffer limit has been reached */
WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
return;
}
/* Consumed the ready message on the psock. Schedule rx_work to
* get more messages.
*/
list_del(&psock->psock_ready_list);
psock->ready_rx_msg = NULL;
/* Commit clearing of ready_rx_msg for queuing work */
smp_mb();
strp_unpause(&psock->strp);
strp_check_rcv(&psock->strp);
}
/* Buffer limit is okay now, add to ready list */
list_add_tail(&kcm->wait_rx_list,
&kcm->mux->kcm_rx_waiters);
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_wait, true);
}
static void kcm_rfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct kcm_sock *kcm = kcm_sk(sk);
struct kcm_mux *mux = kcm->mux;
unsigned int len = skb->truesize;
sk_mem_uncharge(sk, len);
atomic_sub(len, &sk->sk_rmem_alloc);
/* For reading rx_wait and rx_psock without holding lock */
smp_mb__after_atomic();
if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
spin_lock_bh(&mux->rx_lock);
kcm_rcv_ready(kcm);
spin_unlock_bh(&mux->rx_lock);
}
}
static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff_head *list = &sk->sk_receive_queue;
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
return -ENOMEM;
if (!sk_rmem_schedule(sk, skb, skb->truesize))
return -ENOBUFS;
skb->dev = NULL;
skb_orphan(skb);
skb->sk = sk;
skb->destructor = kcm_rfree;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk_mem_charge(sk, skb->truesize);
skb_queue_tail(list, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk);
return 0;
}
/* Requeue received messages for a kcm socket to other kcm sockets. This is
* called with a kcm socket is receive disabled.
* RX mux lock held.
*/
static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
{
struct sk_buff *skb;
struct kcm_sock *kcm;
while ((skb = skb_dequeue(head))) {
/* Reset destructor to avoid calling kcm_rcv_ready */
skb->destructor = sock_rfree;
skb_orphan(skb);
try_again:
if (list_empty(&mux->kcm_rx_waiters)) {
skb_queue_tail(&mux->rx_hold_queue, skb);
continue;
}
kcm = list_first_entry(&mux->kcm_rx_waiters,
struct kcm_sock, wait_rx_list);
if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
/* Should mean socket buffer full */
list_del(&kcm->wait_rx_list);
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_wait, false);
/* Commit rx_wait to read in kcm_free */
smp_wmb();
goto try_again;
}
}
}
/* Lower sock lock held */
static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
struct sk_buff *head)
{
struct kcm_mux *mux = psock->mux;
struct kcm_sock *kcm;
WARN_ON(psock->ready_rx_msg);
if (psock->rx_kcm)
return psock->rx_kcm;
spin_lock_bh(&mux->rx_lock);
if (psock->rx_kcm) {
spin_unlock_bh(&mux->rx_lock);
return psock->rx_kcm;
}
kcm_update_rx_mux_stats(mux, psock);
if (list_empty(&mux->kcm_rx_waiters)) {
psock->ready_rx_msg = head;
strp_pause(&psock->strp);
list_add_tail(&psock->psock_ready_list,
&mux->psocks_ready);
spin_unlock_bh(&mux->rx_lock);
return NULL;
}
kcm = list_first_entry(&mux->kcm_rx_waiters,
struct kcm_sock, wait_rx_list);
list_del(&kcm->wait_rx_list);
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_wait, false);
psock->rx_kcm = kcm;
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_psock, psock);
spin_unlock_bh(&mux->rx_lock);
return kcm;
}
static void kcm_done(struct kcm_sock *kcm);
static void kcm_done_work(struct work_struct *w)
{
kcm_done(container_of(w, struct kcm_sock, done_work));
}
/* Lower sock held */
static void unreserve_rx_kcm(struct kcm_psock *psock,
bool rcv_ready)
{
struct kcm_sock *kcm = psock->rx_kcm;
struct kcm_mux *mux = psock->mux;
if (!kcm)
return;
spin_lock_bh(&mux->rx_lock);
psock->rx_kcm = NULL;
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_psock, NULL);
/* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
* kcm_rfree
*/
smp_mb();
if (unlikely(kcm->done)) {
spin_unlock_bh(&mux->rx_lock);
/* Need to run kcm_done in a task since we need to qcquire
* callback locks which may already be held here.
*/
INIT_WORK(&kcm->done_work, kcm_done_work);
schedule_work(&kcm->done_work);
return;
}
if (unlikely(kcm->rx_disabled)) {
requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
} else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
/* Check for degenerative race with rx_wait that all
* data was dequeued (accounted for in kcm_rfree).
*/
kcm_rcv_ready(kcm);
}
spin_unlock_bh(&mux->rx_lock);
}
/* Lower sock lock held */
static void psock_data_ready(struct sock *sk)
{
struct kcm_psock *psock;
trace_sk_data_ready(sk);
read_lock_bh(&sk->sk_callback_lock);
psock = (struct kcm_psock *)sk->sk_user_data;
if (likely(psock))
strp_data_ready(&psock->strp);
read_unlock_bh(&sk->sk_callback_lock);
}
/* Called with lower sock held */
static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
{
struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
struct kcm_sock *kcm;
try_queue:
kcm = reserve_rx_kcm(psock, skb);
if (!kcm) {
/* Unable to reserve a KCM, message is held in psock and strp
* is paused.
*/
return;
}
if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
/* Should mean socket buffer full */
unreserve_rx_kcm(psock, false);
goto try_queue;
}
}
static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
{
struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
struct bpf_prog *prog = psock->bpf_prog;
int res;
res = bpf_prog_run_pin_on_cpu(prog, skb);
return res;
}
static int kcm_read_sock_done(struct strparser *strp, int err)
{
struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
unreserve_rx_kcm(psock, true);
return err;
}
static void psock_state_change(struct sock *sk)
{
/* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
* since application will normally not poll with EPOLLIN
* on the TCP sockets.
*/
report_csk_error(sk, EPIPE);
}
static void psock_write_space(struct sock *sk)
{
struct kcm_psock *psock;
struct kcm_mux *mux;
struct kcm_sock *kcm;
read_lock_bh(&sk->sk_callback_lock);
psock = (struct kcm_psock *)sk->sk_user_data;
if (unlikely(!psock))
goto out;
mux = psock->mux;
spin_lock_bh(&mux->lock);
/* Check if the socket is reserved so someone is waiting for sending. */
kcm = psock->tx_kcm;
if (kcm && !unlikely(kcm->tx_stopped))
queue_work(kcm_wq, &kcm->tx_work);
spin_unlock_bh(&mux->lock);
out:
read_unlock_bh(&sk->sk_callback_lock);
}
static void unreserve_psock(struct kcm_sock *kcm);
/* kcm sock is locked. */
static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
{
struct kcm_mux *mux = kcm->mux;
struct kcm_psock *psock;
psock = kcm->tx_psock;
smp_rmb(); /* Must read tx_psock before tx_wait */
if (psock) {
WARN_ON(kcm->tx_wait);
if (unlikely(psock->tx_stopped))
unreserve_psock(kcm);
else
return kcm->tx_psock;
}
spin_lock_bh(&mux->lock);
/* Check again under lock to see if psock was reserved for this
* psock via psock_unreserve.
*/
psock = kcm->tx_psock;
if (unlikely(psock)) {
WARN_ON(kcm->tx_wait);
spin_unlock_bh(&mux->lock);
return kcm->tx_psock;
}
if (!list_empty(&mux->psocks_avail)) {
psock = list_first_entry(&mux->psocks_avail,
struct kcm_psock,
psock_avail_list);
list_del(&psock->psock_avail_list);
if (kcm->tx_wait) {
list_del(&kcm->wait_psock_list);
kcm->tx_wait = false;
}
kcm->tx_psock = psock;
psock->tx_kcm = kcm;
KCM_STATS_INCR(psock->stats.reserved);
} else if (!kcm->tx_wait) {
list_add_tail(&kcm->wait_psock_list,
&mux->kcm_tx_waiters);
kcm->tx_wait = true;
}
spin_unlock_bh(&mux->lock);
return psock;
}
/* mux lock held */
static void psock_now_avail(struct kcm_psock *psock)
{
struct kcm_mux *mux = psock->mux;
struct kcm_sock *kcm;
if (list_empty(&mux->kcm_tx_waiters)) {
list_add_tail(&psock->psock_avail_list,
&mux->psocks_avail);
} else {
kcm = list_first_entry(&mux->kcm_tx_waiters,
struct kcm_sock,
wait_psock_list);
list_del(&kcm->wait_psock_list);
kcm->tx_wait = false;
psock->tx_kcm = kcm;
/* Commit before changing tx_psock since that is read in
* reserve_psock before queuing work.
*/
smp_mb();
kcm->tx_psock = psock;
KCM_STATS_INCR(psock->stats.reserved);
queue_work(kcm_wq, &kcm->tx_work);
}
}
/* kcm sock is locked. */
static void unreserve_psock(struct kcm_sock *kcm)
{
struct kcm_psock *psock;
struct kcm_mux *mux = kcm->mux;
spin_lock_bh(&mux->lock);
psock = kcm->tx_psock;
if (WARN_ON(!psock)) {
spin_unlock_bh(&mux->lock);
return;
}
smp_rmb(); /* Read tx_psock before tx_wait */
kcm_update_tx_mux_stats(mux, psock);
WARN_ON(kcm->tx_wait);
kcm->tx_psock = NULL;
psock->tx_kcm = NULL;
KCM_STATS_INCR(psock->stats.unreserved);
if (unlikely(psock->tx_stopped)) {
if (psock->done) {
/* Deferred free */
list_del(&psock->psock_list);
mux->psocks_cnt--;
sock_put(psock->sk);
fput(psock->sk->sk_socket->file);
kmem_cache_free(kcm_psockp, psock);
}
/* Don't put back on available list */
spin_unlock_bh(&mux->lock);
return;
}
psock_now_avail(psock);
spin_unlock_bh(&mux->lock);
}
static void kcm_report_tx_retry(struct kcm_sock *kcm)
{
struct kcm_mux *mux = kcm->mux;
spin_lock_bh(&mux->lock);
KCM_STATS_INCR(mux->stats.tx_retries);
spin_unlock_bh(&mux->lock);
}
/* Write any messages ready on the kcm socket. Called with kcm sock lock
* held. Return bytes actually sent or error.
*/
static int kcm_write_msgs(struct kcm_sock *kcm)
{
unsigned int total_sent = 0;
struct sock *sk = &kcm->sk;
struct kcm_psock *psock;
struct sk_buff *head;
int ret = 0;
kcm->tx_wait_more = false;
psock = kcm->tx_psock;
if (unlikely(psock && psock->tx_stopped)) {
/* A reserved psock was aborted asynchronously. Unreserve
* it and we'll retry the message.
*/
unreserve_psock(kcm);
kcm_report_tx_retry(kcm);
if (skb_queue_empty(&sk->sk_write_queue))
return 0;
kcm_tx_msg(skb_peek(&sk->sk_write_queue))->started_tx = false;
}
retry:
while ((head = skb_peek(&sk->sk_write_queue))) {
struct msghdr msg = {
.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
};
struct kcm_tx_msg *txm = kcm_tx_msg(head);
struct sk_buff *skb;
unsigned int msize;
int i;
if (!txm->started_tx) {
psock = reserve_psock(kcm);
if (!psock)
goto out;
skb = head;
txm->frag_offset = 0;
txm->sent = 0;
txm->started_tx = true;
} else {
if (WARN_ON(!psock)) {
ret = -EINVAL;
goto out;
}
skb = txm->frag_skb;
}
if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
ret = -EINVAL;
goto out;
}
msize = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
msize += skb_shinfo(skb)->frags[i].bv_len;
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE,
skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags,
msize);
iov_iter_advance(&msg.msg_iter, txm->frag_offset);
do {
ret = sock_sendmsg(psock->sk->sk_socket, &msg);
if (ret <= 0) {
if (ret == -EAGAIN) {
/* Save state to try again when there's
* write space on the socket
*/
txm->frag_skb = skb;
ret = 0;
goto out;
}
/* Hard failure in sending message, abort this
* psock since it has lost framing
* synchronization and retry sending the
* message from the beginning.
*/
kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
true);
unreserve_psock(kcm);
psock = NULL;
txm->started_tx = false;
kcm_report_tx_retry(kcm);
ret = 0;
goto retry;
}
txm->sent += ret;
txm->frag_offset += ret;
KCM_STATS_ADD(psock->stats.tx_bytes, ret);
} while (msg.msg_iter.count > 0);
if (skb == head) {
if (skb_has_frag_list(skb)) {
txm->frag_skb = skb_shinfo(skb)->frag_list;
txm->frag_offset = 0;
continue;
}
} else if (skb->next) {
txm->frag_skb = skb->next;
txm->frag_offset = 0;
continue;
}
/* Successfully sent the whole packet, account for it. */
sk->sk_wmem_queued -= txm->sent;
total_sent += txm->sent;
skb_dequeue(&sk->sk_write_queue);
kfree_skb(head);
KCM_STATS_INCR(psock->stats.tx_msgs);
}
out:
if (!head) {
/* Done with all queued messages. */
WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
if (psock)
unreserve_psock(kcm);
}
/* Check if write space is available */
sk->sk_write_space(sk);
return total_sent ? : ret;
}
static void kcm_tx_work(struct work_struct *w)
{
struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
struct sock *sk = &kcm->sk;
int err;
lock_sock(sk);
/* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
* aborts
*/
err = kcm_write_msgs(kcm);
if (err < 0) {
/* Hard failure in write, report error on KCM socket */
pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
report_csk_error(&kcm->sk, -err);
goto out;
}
/* Primarily for SOCK_SEQPACKET sockets */
if (likely(sk->sk_socket) &&
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk->sk_write_space(sk);
}
out:
release_sock(sk);
}
static void kcm_push(struct kcm_sock *kcm)
{
if (kcm->tx_wait_more)
kcm_write_msgs(kcm);
}
static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct kcm_sock *kcm = kcm_sk(sk);
struct sk_buff *skb = NULL, *head = NULL;
size_t copy, copied = 0;
long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
int eor = (sock->type == SOCK_DGRAM) ?
!(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
int err = -EPIPE;
lock_sock(sk);
/* Per tcp_sendmsg this should be in poll */
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
if (sk->sk_err)
goto out_error;
if (kcm->seq_skb) {
/* Previously opened message */
head = kcm->seq_skb;
skb = kcm_tx_msg(head)->last_skb;
goto start;
}
/* Call the sk_stream functions to manage the sndbuf mem. */
if (!sk_stream_memory_free(sk)) {
kcm_push(kcm);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
err = sk_stream_wait_memory(sk, &timeo);
if (err)
goto out_error;
}
if (msg_data_left(msg)) {
/* New message, alloc head skb */
head = alloc_skb(0, sk->sk_allocation);
while (!head) {
kcm_push(kcm);
err = sk_stream_wait_memory(sk, &timeo);
if (err)
goto out_error;
head = alloc_skb(0, sk->sk_allocation);
}
skb = head;
/* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
* csum_and_copy_from_iter from skb_do_copy_data_nocache.
*/
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
start:
while (msg_data_left(msg)) {
bool merge = true;
int i = skb_shinfo(skb)->nr_frags;
struct page_frag *pfrag = sk_page_frag(sk);
if (!sk_page_frag_refill(sk, pfrag))
goto wait_for_memory;
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
if (i == MAX_SKB_FRAGS) {
struct sk_buff *tskb;
tskb = alloc_skb(0, sk->sk_allocation);
if (!tskb)
goto wait_for_memory;
if (head == skb)
skb_shinfo(head)->frag_list = tskb;
else
skb->next = tskb;
skb = tskb;
skb->ip_summed = CHECKSUM_UNNECESSARY;
continue;
}
merge = false;
}
if (msg->msg_flags & MSG_SPLICE_PAGES) {
copy = msg_data_left(msg);
if (!sk_wmem_schedule(sk, copy))
goto wait_for_memory;
err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
sk->sk_allocation);
if (err < 0) {
if (err == -EMSGSIZE)
goto wait_for_memory;
goto out_error;
}
copy = err;
skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
sk_wmem_queued_add(sk, copy);
sk_mem_charge(sk, copy);
if (head != skb)
head->truesize += copy;
} else {
copy = min_t(int, msg_data_left(msg),
pfrag->size - pfrag->offset);
if (!sk_wmem_schedule(sk, copy))
goto wait_for_memory;
err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
pfrag->page,
pfrag->offset,
copy);
if (err)
goto out_error;
/* Update the skb. */
if (merge) {
skb_frag_size_add(
&skb_shinfo(skb)->frags[i - 1], copy);
} else {
skb_fill_page_desc(skb, i, pfrag->page,
pfrag->offset, copy);
get_page(pfrag->page);
}
pfrag->offset += copy;
}
copied += copy;
if (head != skb) {
head->len += copy;
head->data_len += copy;
}
continue;
wait_for_memory:
kcm_push(kcm);
err = sk_stream_wait_memory(sk, &timeo);
if (err)
goto out_error;
}
if (eor) {
bool not_busy = skb_queue_empty(&sk->sk_write_queue);
if (head) {
/* Message complete, queue it on send buffer */
__skb_queue_tail(&sk->sk_write_queue, head);
kcm->seq_skb = NULL;
KCM_STATS_INCR(kcm->stats.tx_msgs);
}
if (msg->msg_flags & MSG_BATCH) {
kcm->tx_wait_more = true;
} else if (kcm->tx_wait_more || not_busy) {
err = kcm_write_msgs(kcm);
if (err < 0) {
/* We got a hard error in write_msgs but have
* already queued this message. Report an error
* in the socket, but don't affect return value
* from sendmsg
*/
pr_warn("KCM: Hard failure on kcm_write_msgs\n");
report_csk_error(&kcm->sk, -err);
}
}
} else {
/* Message not complete, save state */
partial_message:
if (head) {
kcm->seq_skb = head;
kcm_tx_msg(head)->last_skb = skb;
}
}
KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
release_sock(sk);
return copied;
out_error:
kcm_push(kcm);
if (sock->type == SOCK_SEQPACKET) {
/* Wrote some bytes before encountering an
* error, return partial success.
*/
if (copied)
goto partial_message;
if (head != kcm->seq_skb)
kfree_skb(head);
} else {
kfree_skb(head);
kcm->seq_skb = NULL;
}
err = sk_stream_error(sk, msg->msg_flags, err);
/* make sure we wake any epoll edge trigger waiter */
if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
sk->sk_write_space(sk);
release_sock(sk);
return err;
}
static void kcm_splice_eof(struct socket *sock)
{
struct sock *sk = sock->sk;
struct kcm_sock *kcm = kcm_sk(sk);
if (skb_queue_empty_lockless(&sk->sk_write_queue))
return;
lock_sock(sk);
kcm_write_msgs(kcm);
release_sock(sk);
}
static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
struct sock *sk = sock->sk;
struct kcm_sock *kcm = kcm_sk(sk);
int err = 0;
struct strp_msg *stm;
int copied = 0;
struct sk_buff *skb;
skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
/* Okay, have a message on the receive queue */
stm = strp_msg(skb);
if (len > stm->full_len)
len = stm->full_len;
err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
if (err < 0)
goto out;
copied = len;
if (likely(!(flags & MSG_PEEK))) {
KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
if (copied < stm->full_len) {
if (sock->type == SOCK_DGRAM) {
/* Truncated message */
msg->msg_flags |= MSG_TRUNC;
goto msg_finished;
}
stm->offset += copied;
stm->full_len -= copied;
} else {
msg_finished:
/* Finished with message */
msg->msg_flags |= MSG_EOR;
KCM_STATS_INCR(kcm->stats.rx_msgs);
}
}
out:
skb_free_datagram(sk, skb);
return copied ? : err;
}
static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct sock *sk = sock->sk;
struct kcm_sock *kcm = kcm_sk(sk);
struct strp_msg *stm;
int err = 0;
ssize_t copied;
struct sk_buff *skb;
/* Only support splice for SOCKSEQPACKET */
skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto err_out;
/* Okay, have a message on the receive queue */
stm = strp_msg(skb);
if (len > stm->full_len)
len = stm->full_len;
copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
if (copied < 0) {
err = copied;
goto err_out;
}
KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
stm->offset += copied;
stm->full_len -= copied;
/* We have no way to return MSG_EOR. If all the bytes have been
* read we still leave the message in the receive socket buffer.
* A subsequent recvmsg needs to be done to return MSG_EOR and
* finish reading the message.
*/
skb_free_datagram(sk, skb);
return copied;
err_out:
skb_free_datagram(sk, skb);
return err;
}
/* kcm sock lock held */
static void kcm_recv_disable(struct kcm_sock *kcm)
{
struct kcm_mux *mux = kcm->mux;
if (kcm->rx_disabled)
return;
spin_lock_bh(&mux->rx_lock);
kcm->rx_disabled = 1;
/* If a psock is reserved we'll do cleanup in unreserve */
if (!kcm->rx_psock) {
if (kcm->rx_wait) {
list_del(&kcm->wait_rx_list);
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_wait, false);
}
requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
}
spin_unlock_bh(&mux->rx_lock);
}
/* kcm sock lock held */
static void kcm_recv_enable(struct kcm_sock *kcm)
{
struct kcm_mux *mux = kcm->mux;
if (!kcm->rx_disabled)
return;
spin_lock_bh(&mux->rx_lock);
kcm->rx_disabled = 0;
kcm_rcv_ready(kcm);
spin_unlock_bh(&mux->rx_lock);
}
static int kcm_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct kcm_sock *kcm = kcm_sk(sock->sk);
int val, valbool;
int err = 0;
if (level != SOL_KCM)
return -ENOPROTOOPT;
if (optlen < sizeof(int))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(int)))
return -EFAULT;
valbool = val ? 1 : 0;
switch (optname) {
case KCM_RECV_DISABLE:
lock_sock(&kcm->sk);
if (valbool)
kcm_recv_disable(kcm);
else
kcm_recv_enable(kcm);
release_sock(&kcm->sk);
break;
default:
err = -ENOPROTOOPT;
}
return err;
}
static int kcm_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct kcm_sock *kcm = kcm_sk(sock->sk);
int val, len;
if (level != SOL_KCM)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
switch (optname) {
case KCM_RECV_DISABLE:
val = kcm->rx_disabled;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
{
struct kcm_sock *tkcm;
struct list_head *head;
int index = 0;
/* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
* we set sk_state, otherwise epoll_wait always returns right away with
* EPOLLHUP
*/
kcm->sk.sk_state = TCP_ESTABLISHED;
/* Add to mux's kcm sockets list */
kcm->mux = mux;
spin_lock_bh(&mux->lock);
head = &mux->kcm_socks;
list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
if (tkcm->index != index)
break;
head = &tkcm->kcm_sock_list;
index++;
}
list_add(&kcm->kcm_sock_list, head);
kcm->index = index;
mux->kcm_socks_cnt++;
spin_unlock_bh(&mux->lock);
INIT_WORK(&kcm->tx_work, kcm_tx_work);
spin_lock_bh(&mux->rx_lock);
kcm_rcv_ready(kcm);
spin_unlock_bh(&mux->rx_lock);
}
static int kcm_attach(struct socket *sock, struct socket *csock,
struct bpf_prog *prog)
{
struct kcm_sock *kcm = kcm_sk(sock->sk);
struct kcm_mux *mux = kcm->mux;
struct sock *csk;
struct kcm_psock *psock = NULL, *tpsock;
struct list_head *head;
int index = 0;
static const struct strp_callbacks cb = {
.rcv_msg = kcm_rcv_strparser,
.parse_msg = kcm_parse_func_strparser,
.read_sock_done = kcm_read_sock_done,
};
int err = 0;
csk = csock->sk;
if (!csk)
return -EINVAL;
lock_sock(csk);
/* Only allow TCP sockets to be attached for now */
if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
csk->sk_protocol != IPPROTO_TCP) {
err = -EOPNOTSUPP;
goto out;
}
/* Don't allow listeners or closed sockets */
if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
err = -EOPNOTSUPP;
goto out;
}
psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
if (!psock) {
err = -ENOMEM;
goto out;
}
psock->mux = mux;
psock->sk = csk;
psock->bpf_prog = prog;
write_lock_bh(&csk->sk_callback_lock);
/* Check if sk_user_data is already by KCM or someone else.
* Must be done under lock to prevent race conditions.
*/
if (csk->sk_user_data) {
write_unlock_bh(&csk->sk_callback_lock);
kmem_cache_free(kcm_psockp, psock);
err = -EALREADY;
goto out;
}
err = strp_init(&psock->strp, csk, &cb);
if (err) {
write_unlock_bh(&csk->sk_callback_lock);
kmem_cache_free(kcm_psockp, psock);
goto out;
}
psock->save_data_ready = csk->sk_data_ready;
psock->save_write_space = csk->sk_write_space;
psock->save_state_change = csk->sk_state_change;
csk->sk_user_data = psock;
csk->sk_data_ready = psock_data_ready;
csk->sk_write_space = psock_write_space;
csk->sk_state_change = psock_state_change;
write_unlock_bh(&csk->sk_callback_lock);
sock_hold(csk);
/* Finished initialization, now add the psock to the MUX. */
spin_lock_bh(&mux->lock);
head = &mux->psocks;
list_for_each_entry(tpsock, &mux->psocks, psock_list) {
if (tpsock->index != index)
break;
head = &tpsock->psock_list;
index++;
}
list_add(&psock->psock_list, head);
psock->index = index;
KCM_STATS_INCR(mux->stats.psock_attach);
mux->psocks_cnt++;
psock_now_avail(psock);
spin_unlock_bh(&mux->lock);
/* Schedule RX work in case there are already bytes queued */
strp_check_rcv(&psock->strp);
out:
release_sock(csk);
return err;
}
static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
{
struct socket *csock;
struct bpf_prog *prog;
int err;
csock = sockfd_lookup(info->fd, &err);
if (!csock)
return -ENOENT;
prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
if (IS_ERR(prog)) {
err = PTR_ERR(prog);
goto out;
}
err = kcm_attach(sock, csock, prog);
if (err) {
bpf_prog_put(prog);
goto out;
}
/* Keep reference on file also */
return 0;
out:
sockfd_put(csock);
return err;
}
static void kcm_unattach(struct kcm_psock *psock)
{
struct sock *csk = psock->sk;
struct kcm_mux *mux = psock->mux;
lock_sock(csk);
/* Stop getting callbacks from TCP socket. After this there should
* be no way to reserve a kcm for this psock.
*/
write_lock_bh(&csk->sk_callback_lock);
csk->sk_user_data = NULL;
csk->sk_data_ready = psock->save_data_ready;
csk->sk_write_space = psock->save_write_space;
csk->sk_state_change = psock->save_state_change;
strp_stop(&psock->strp);
if (WARN_ON(psock->rx_kcm)) {
write_unlock_bh(&csk->sk_callback_lock);
release_sock(csk);
return;
}
spin_lock_bh(&mux->rx_lock);
/* Stop receiver activities. After this point psock should not be
* able to get onto ready list either through callbacks or work.
*/
if (psock->ready_rx_msg) {
list_del(&psock->psock_ready_list);
kfree_skb(psock->ready_rx_msg);
psock->ready_rx_msg = NULL;
KCM_STATS_INCR(mux->stats.rx_ready_drops);
}
spin_unlock_bh(&mux->rx_lock);
write_unlock_bh(&csk->sk_callback_lock);
/* Call strp_done without sock lock */
release_sock(csk);
strp_done(&psock->strp);
lock_sock(csk);
bpf_prog_put(psock->bpf_prog);
spin_lock_bh(&mux->lock);
aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
KCM_STATS_INCR(mux->stats.psock_unattach);
if (psock->tx_kcm) {
/* psock was reserved. Just mark it finished and we will clean
* up in the kcm paths, we need kcm lock which can not be
* acquired here.
*/
KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
spin_unlock_bh(&mux->lock);
/* We are unattaching a socket that is reserved. Abort the
* socket since we may be out of sync in sending on it. We need
* to do this without the mux lock.
*/
kcm_abort_tx_psock(psock, EPIPE, false);
spin_lock_bh(&mux->lock);
if (!psock->tx_kcm) {
/* psock now unreserved in window mux was unlocked */
goto no_reserved;
}
psock->done = 1;
/* Commit done before queuing work to process it */
smp_mb();
/* Queue tx work to make sure psock->done is handled */
queue_work(kcm_wq, &psock->tx_kcm->tx_work);
spin_unlock_bh(&mux->lock);
} else {
no_reserved:
if (!psock->tx_stopped)
list_del(&psock->psock_avail_list);
list_del(&psock->psock_list);
mux->psocks_cnt--;
spin_unlock_bh(&mux->lock);
sock_put(csk);
fput(csk->sk_socket->file);
kmem_cache_free(kcm_psockp, psock);
}
release_sock(csk);
}
static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
{
struct kcm_sock *kcm = kcm_sk(sock->sk);
struct kcm_mux *mux = kcm->mux;
struct kcm_psock *psock;
struct socket *csock;
struct sock *csk;
int err;
csock = sockfd_lookup(info->fd, &err);
if (!csock)
return -ENOENT;
csk = csock->sk;
if (!csk) {
err = -EINVAL;
goto out;
}
err = -ENOENT;
spin_lock_bh(&mux->lock);
list_for_each_entry(psock, &mux->psocks, psock_list) {
if (psock->sk != csk)
continue;
/* Found the matching psock */
if (psock->unattaching || WARN_ON(psock->done)) {
err = -EALREADY;
break;
}
psock->unattaching = 1;
spin_unlock_bh(&mux->lock);
/* Lower socket lock should already be held */
kcm_unattach(psock);
err = 0;
goto out;
}
spin_unlock_bh(&mux->lock);
out:
sockfd_put(csock);
return err;
}
static struct proto kcm_proto = {
.name = "KCM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct kcm_sock),
};
/* Clone a kcm socket. */
static struct file *kcm_clone(struct socket *osock)
{
struct socket *newsock;
struct sock *newsk;
newsock = sock_alloc();
if (!newsock)
return ERR_PTR(-ENFILE);
newsock->type = osock->type;
newsock->ops = osock->ops;
__module_get(newsock->ops->owner);
newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
&kcm_proto, false);
if (!newsk) {
sock_release(newsock);
return ERR_PTR(-ENOMEM);
}
sock_init_data(newsock, newsk);
init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
}
static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
int err;
switch (cmd) {
case SIOCKCMATTACH: {
struct kcm_attach info;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
err = kcm_attach_ioctl(sock, &info);
break;
}
case SIOCKCMUNATTACH: {
struct kcm_unattach info;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
err = kcm_unattach_ioctl(sock, &info);
break;
}
case SIOCKCMCLONE: {
struct kcm_clone info;
struct file *file;
info.fd = get_unused_fd_flags(0);
if (unlikely(info.fd < 0))
return info.fd;
file = kcm_clone(sock);
if (IS_ERR(file)) {
put_unused_fd(info.fd);
return PTR_ERR(file);
}
if (copy_to_user((void __user *)arg, &info,
sizeof(info))) {
put_unused_fd(info.fd);
fput(file);
return -EFAULT;
}
fd_install(info.fd, file);
err = 0;
break;
}
default:
err = -ENOIOCTLCMD;
break;
}
return err;
}
static void free_mux(struct rcu_head *rcu)
{
struct kcm_mux *mux = container_of(rcu,
struct kcm_mux, rcu);
kmem_cache_free(kcm_muxp, mux);
}
static void release_mux(struct kcm_mux *mux)
{
struct kcm_net *knet = mux->knet;
struct kcm_psock *psock, *tmp_psock;
/* Release psocks */
list_for_each_entry_safe(psock, tmp_psock,
&mux->psocks, psock_list) {
if (!WARN_ON(psock->unattaching))
kcm_unattach(psock);
}
if (WARN_ON(mux->psocks_cnt))
return;
__skb_queue_purge(&mux->rx_hold_queue);
mutex_lock(&knet->mutex);
aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
aggregate_psock_stats(&mux->aggregate_psock_stats,
&knet->aggregate_psock_stats);
aggregate_strp_stats(&mux->aggregate_strp_stats,
&knet->aggregate_strp_stats);
list_del_rcu(&mux->kcm_mux_list);
knet->count--;
mutex_unlock(&knet->mutex);
call_rcu(&mux->rcu, free_mux);
}
static void kcm_done(struct kcm_sock *kcm)
{
struct kcm_mux *mux = kcm->mux;
struct sock *sk = &kcm->sk;
int socks_cnt;
spin_lock_bh(&mux->rx_lock);
if (kcm->rx_psock) {
/* Cleanup in unreserve_rx_kcm */
WARN_ON(kcm->done);
kcm->rx_disabled = 1;
kcm->done = 1;
spin_unlock_bh(&mux->rx_lock);
return;
}
if (kcm->rx_wait) {
list_del(&kcm->wait_rx_list);
/* paired with lockless reads in kcm_rfree() */
WRITE_ONCE(kcm->rx_wait, false);
}
/* Move any pending receive messages to other kcm sockets */
requeue_rx_msgs(mux, &sk->sk_receive_queue);
spin_unlock_bh(&mux->rx_lock);
if (WARN_ON(sk_rmem_alloc_get(sk)))
return;
/* Detach from MUX */
spin_lock_bh(&mux->lock);
list_del(&kcm->kcm_sock_list);
mux->kcm_socks_cnt--;
socks_cnt = mux->kcm_socks_cnt;
spin_unlock_bh(&mux->lock);
if (!socks_cnt) {
/* We are done with the mux now. */
release_mux(mux);
}
WARN_ON(kcm->rx_wait);
sock_put(&kcm->sk);
}
/* Called by kcm_release to close a KCM socket.
* If this is the last KCM socket on the MUX, destroy the MUX.
*/
static int kcm_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct kcm_sock *kcm;
struct kcm_mux *mux;
struct kcm_psock *psock;
if (!sk)
return 0;
kcm = kcm_sk(sk);
mux = kcm->mux;
lock_sock(sk);
sock_orphan(sk);
kfree_skb(kcm->seq_skb);
/* Purge queue under lock to avoid race condition with tx_work trying
* to act when queue is nonempty. If tx_work runs after this point
* it will just return.
*/
__skb_queue_purge(&sk->sk_write_queue);
/* Set tx_stopped. This is checked when psock is bound to a kcm and we
* get a writespace callback. This prevents further work being queued
* from the callback (unbinding the psock occurs after canceling work.
*/
kcm->tx_stopped = 1;
release_sock(sk);
spin_lock_bh(&mux->lock);
if (kcm->tx_wait) {
/* Take of tx_wait list, after this point there should be no way
* that a psock will be assigned to this kcm.
*/
list_del(&kcm->wait_psock_list);
kcm->tx_wait = false;
}
spin_unlock_bh(&mux->lock);
/* Cancel work. After this point there should be no outside references
* to the kcm socket.
*/
cancel_work_sync(&kcm->tx_work);
lock_sock(sk);
psock = kcm->tx_psock;
if (psock) {
/* A psock was reserved, so we need to kill it since it
* may already have some bytes queued from a message. We
* need to do this after removing kcm from tx_wait list.
*/
kcm_abort_tx_psock(psock, EPIPE, false);
unreserve_psock(kcm);
}
release_sock(sk);
WARN_ON(kcm->tx_wait);
WARN_ON(kcm->tx_psock);
sock->sk = NULL;
kcm_done(kcm);
return 0;
}
static const struct proto_ops kcm_dgram_ops = {
.family = PF_KCM,
.owner = THIS_MODULE,
.release = kcm_release,
.bind = sock_no_bind,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
.poll = datagram_poll,
.ioctl = kcm_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = kcm_setsockopt,
.getsockopt = kcm_getsockopt,
.sendmsg = kcm_sendmsg,
.recvmsg = kcm_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = kcm_splice_eof,
};
static const struct proto_ops kcm_seqpacket_ops = {
.family = PF_KCM,
.owner = THIS_MODULE,
.release = kcm_release,
.bind = sock_no_bind,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
.poll = datagram_poll,
.ioctl = kcm_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = kcm_setsockopt,
.getsockopt = kcm_getsockopt,
.sendmsg = kcm_sendmsg,
.recvmsg = kcm_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = kcm_splice_eof,
.splice_read = kcm_splice_read,
};
/* Create proto operation for kcm sockets */
static int kcm_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
struct kcm_net *knet = net_generic(net, kcm_net_id);
struct sock *sk;
struct kcm_mux *mux;
switch (sock->type) {
case SOCK_DGRAM:
sock->ops = &kcm_dgram_ops;
break;
case SOCK_SEQPACKET:
sock->ops = &kcm_seqpacket_ops;
break;
default:
return -ESOCKTNOSUPPORT;
}
if (protocol != KCMPROTO_CONNECTED)
return -EPROTONOSUPPORT;
sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
if (!sk)
return -ENOMEM;
/* Allocate a kcm mux, shared between KCM sockets */
mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
if (!mux) {
sk_free(sk);
return -ENOMEM;
}
spin_lock_init(&mux->lock);
spin_lock_init(&mux->rx_lock);
INIT_LIST_HEAD(&mux->kcm_socks);
INIT_LIST_HEAD(&mux->kcm_rx_waiters);
INIT_LIST_HEAD(&mux->kcm_tx_waiters);
INIT_LIST_HEAD(&mux->psocks);
INIT_LIST_HEAD(&mux->psocks_ready);
INIT_LIST_HEAD(&mux->psocks_avail);
mux->knet = knet;
/* Add new MUX to list */
mutex_lock(&knet->mutex);
list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
knet->count++;
mutex_unlock(&knet->mutex);
skb_queue_head_init(&mux->rx_hold_queue);
/* Init KCM socket */
sock_init_data(sock, sk);
init_kcm_sock(kcm_sk(sk), mux);
return 0;
}
static const struct net_proto_family kcm_family_ops = {
.family = PF_KCM,
.create = kcm_create,
.owner = THIS_MODULE,
};
static __net_init int kcm_init_net(struct net *net)
{
struct kcm_net *knet = net_generic(net, kcm_net_id);
INIT_LIST_HEAD_RCU(&knet->mux_list);
mutex_init(&knet->mutex);
return 0;
}
static __net_exit void kcm_exit_net(struct net *net)
{
struct kcm_net *knet = net_generic(net, kcm_net_id);
/* All KCM sockets should be closed at this point, which should mean
* that all multiplexors and psocks have been destroyed.
*/
WARN_ON(!list_empty(&knet->mux_list));
mutex_destroy(&knet->mutex);
}
static struct pernet_operations kcm_net_ops = {
.init = kcm_init_net,
.exit = kcm_exit_net,
.id = &kcm_net_id,
.size = sizeof(struct kcm_net),
};
static int __init kcm_init(void)
{
int err = -ENOMEM;
kcm_muxp = kmem_cache_create("kcm_mux_cache",
sizeof(struct kcm_mux), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!kcm_muxp)
goto fail;
kcm_psockp = kmem_cache_create("kcm_psock_cache",
sizeof(struct kcm_psock), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!kcm_psockp)
goto fail;
kcm_wq = create_singlethread_workqueue("kkcmd");
if (!kcm_wq)
goto fail;
err = proto_register(&kcm_proto, 1);
if (err)
goto fail;
err = register_pernet_device(&kcm_net_ops);
if (err)
goto net_ops_fail;
err = sock_register(&kcm_family_ops);
if (err)
goto sock_register_fail;
err = kcm_proc_init();
if (err)
goto proc_init_fail;
return 0;
proc_init_fail:
sock_unregister(PF_KCM);
sock_register_fail:
unregister_pernet_device(&kcm_net_ops);
net_ops_fail:
proto_unregister(&kcm_proto);
fail:
kmem_cache_destroy(kcm_muxp);
kmem_cache_destroy(kcm_psockp);
if (kcm_wq)
destroy_workqueue(kcm_wq);
return err;
}
static void __exit kcm_exit(void)
{
kcm_proc_exit();
sock_unregister(PF_KCM);
unregister_pernet_device(&kcm_net_ops);
proto_unregister(&kcm_proto);
destroy_workqueue(kcm_wq);
kmem_cache_destroy(kcm_muxp);
kmem_cache_destroy(kcm_psockp);
}
module_init(kcm_init);
module_exit(kcm_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_KCM);
| linux-master | net/kcm/kcmsock.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/proc_fs.h>
#include <linux/rculist.h>
#include <linux/seq_file.h>
#include <linux/socket.h>
#include <net/inet_sock.h>
#include <net/kcm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/tcp.h>
#ifdef CONFIG_PROC_FS
static struct kcm_mux *kcm_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct kcm_net *knet = net_generic(net, kcm_net_id);
return list_first_or_null_rcu(&knet->mux_list,
struct kcm_mux, kcm_mux_list);
}
static struct kcm_mux *kcm_get_next(struct kcm_mux *mux)
{
struct kcm_net *knet = mux->knet;
return list_next_or_null_rcu(&knet->mux_list, &mux->kcm_mux_list,
struct kcm_mux, kcm_mux_list);
}
static struct kcm_mux *kcm_get_idx(struct seq_file *seq, loff_t pos)
{
struct net *net = seq_file_net(seq);
struct kcm_net *knet = net_generic(net, kcm_net_id);
struct kcm_mux *m;
list_for_each_entry_rcu(m, &knet->mux_list, kcm_mux_list) {
if (!pos)
return m;
--pos;
}
return NULL;
}
static void *kcm_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
void *p;
if (v == SEQ_START_TOKEN)
p = kcm_get_first(seq);
else
p = kcm_get_next(v);
++*pos;
return p;
}
static void *kcm_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(rcu)
{
rcu_read_lock();
if (!*pos)
return SEQ_START_TOKEN;
else
return kcm_get_idx(seq, *pos - 1);
}
static void kcm_seq_stop(struct seq_file *seq, void *v)
__releases(rcu)
{
rcu_read_unlock();
}
struct kcm_proc_mux_state {
struct seq_net_private p;
int idx;
};
static void kcm_format_mux_header(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct kcm_net *knet = net_generic(net, kcm_net_id);
seq_printf(seq,
"*** KCM statistics (%d MUX) ****\n",
knet->count);
seq_printf(seq,
"%-14s %-10s %-16s %-10s %-16s %-8s %-8s %-8s %-8s %s",
"Object",
"RX-Msgs",
"RX-Bytes",
"TX-Msgs",
"TX-Bytes",
"Recv-Q",
"Rmem",
"Send-Q",
"Smem",
"Status");
/* XXX: pdsts header stuff here */
seq_puts(seq, "\n");
}
static void kcm_format_sock(struct kcm_sock *kcm, struct seq_file *seq,
int i, int *len)
{
seq_printf(seq,
" kcm-%-7u %-10llu %-16llu %-10llu %-16llu %-8d %-8d %-8d %-8s ",
kcm->index,
kcm->stats.rx_msgs,
kcm->stats.rx_bytes,
kcm->stats.tx_msgs,
kcm->stats.tx_bytes,
kcm->sk.sk_receive_queue.qlen,
sk_rmem_alloc_get(&kcm->sk),
kcm->sk.sk_write_queue.qlen,
"-");
if (kcm->tx_psock)
seq_printf(seq, "Psck-%u ", kcm->tx_psock->index);
if (kcm->tx_wait)
seq_puts(seq, "TxWait ");
if (kcm->tx_wait_more)
seq_puts(seq, "WMore ");
if (kcm->rx_wait)
seq_puts(seq, "RxWait ");
seq_puts(seq, "\n");
}
static void kcm_format_psock(struct kcm_psock *psock, struct seq_file *seq,
int i, int *len)
{
seq_printf(seq,
" psock-%-5u %-10llu %-16llu %-10llu %-16llu %-8d %-8d %-8d %-8d ",
psock->index,
psock->strp.stats.msgs,
psock->strp.stats.bytes,
psock->stats.tx_msgs,
psock->stats.tx_bytes,
psock->sk->sk_receive_queue.qlen,
atomic_read(&psock->sk->sk_rmem_alloc),
psock->sk->sk_write_queue.qlen,
refcount_read(&psock->sk->sk_wmem_alloc));
if (psock->done)
seq_puts(seq, "Done ");
if (psock->tx_stopped)
seq_puts(seq, "TxStop ");
if (psock->strp.stopped)
seq_puts(seq, "RxStop ");
if (psock->tx_kcm)
seq_printf(seq, "Rsvd-%d ", psock->tx_kcm->index);
if (!psock->strp.paused && !psock->ready_rx_msg) {
if (psock->sk->sk_receive_queue.qlen) {
if (psock->strp.need_bytes)
seq_printf(seq, "RxWait=%u ",
psock->strp.need_bytes);
else
seq_printf(seq, "RxWait ");
}
} else {
if (psock->strp.paused)
seq_puts(seq, "RxPause ");
if (psock->ready_rx_msg)
seq_puts(seq, "RdyRx ");
}
seq_puts(seq, "\n");
}
static void
kcm_format_mux(struct kcm_mux *mux, loff_t idx, struct seq_file *seq)
{
int i, len;
struct kcm_sock *kcm;
struct kcm_psock *psock;
/* mux information */
seq_printf(seq,
"%-6s%-8s %-10llu %-16llu %-10llu %-16llu %-8s %-8s %-8s %-8s ",
"mux", "",
mux->stats.rx_msgs,
mux->stats.rx_bytes,
mux->stats.tx_msgs,
mux->stats.tx_bytes,
"-", "-", "-", "-");
seq_printf(seq, "KCMs: %d, Psocks %d\n",
mux->kcm_socks_cnt, mux->psocks_cnt);
/* kcm sock information */
i = 0;
spin_lock_bh(&mux->lock);
list_for_each_entry(kcm, &mux->kcm_socks, kcm_sock_list) {
kcm_format_sock(kcm, seq, i, &len);
i++;
}
i = 0;
list_for_each_entry(psock, &mux->psocks, psock_list) {
kcm_format_psock(psock, seq, i, &len);
i++;
}
spin_unlock_bh(&mux->lock);
}
static int kcm_seq_show(struct seq_file *seq, void *v)
{
struct kcm_proc_mux_state *mux_state;
mux_state = seq->private;
if (v == SEQ_START_TOKEN) {
mux_state->idx = 0;
kcm_format_mux_header(seq);
} else {
kcm_format_mux(v, mux_state->idx, seq);
mux_state->idx++;
}
return 0;
}
static const struct seq_operations kcm_seq_ops = {
.show = kcm_seq_show,
.start = kcm_seq_start,
.next = kcm_seq_next,
.stop = kcm_seq_stop,
};
static int kcm_stats_seq_show(struct seq_file *seq, void *v)
{
struct kcm_psock_stats psock_stats;
struct kcm_mux_stats mux_stats;
struct strp_aggr_stats strp_stats;
struct kcm_mux *mux;
struct kcm_psock *psock;
struct net *net = seq->private;
struct kcm_net *knet = net_generic(net, kcm_net_id);
memset(&mux_stats, 0, sizeof(mux_stats));
memset(&psock_stats, 0, sizeof(psock_stats));
memset(&strp_stats, 0, sizeof(strp_stats));
mutex_lock(&knet->mutex);
aggregate_mux_stats(&knet->aggregate_mux_stats, &mux_stats);
aggregate_psock_stats(&knet->aggregate_psock_stats,
&psock_stats);
aggregate_strp_stats(&knet->aggregate_strp_stats,
&strp_stats);
list_for_each_entry(mux, &knet->mux_list, kcm_mux_list) {
spin_lock_bh(&mux->lock);
aggregate_mux_stats(&mux->stats, &mux_stats);
aggregate_psock_stats(&mux->aggregate_psock_stats,
&psock_stats);
aggregate_strp_stats(&mux->aggregate_strp_stats,
&strp_stats);
list_for_each_entry(psock, &mux->psocks, psock_list) {
aggregate_psock_stats(&psock->stats, &psock_stats);
save_strp_stats(&psock->strp, &strp_stats);
}
spin_unlock_bh(&mux->lock);
}
mutex_unlock(&knet->mutex);
seq_printf(seq,
"%-8s %-10s %-16s %-10s %-16s %-10s %-10s %-10s %-10s %-10s\n",
"MUX",
"RX-Msgs",
"RX-Bytes",
"TX-Msgs",
"TX-Bytes",
"TX-Retries",
"Attach",
"Unattach",
"UnattchRsvd",
"RX-RdyDrops");
seq_printf(seq,
"%-8s %-10llu %-16llu %-10llu %-16llu %-10u %-10u %-10u %-10u %-10u\n",
"",
mux_stats.rx_msgs,
mux_stats.rx_bytes,
mux_stats.tx_msgs,
mux_stats.tx_bytes,
mux_stats.tx_retries,
mux_stats.psock_attach,
mux_stats.psock_unattach_rsvd,
mux_stats.psock_unattach,
mux_stats.rx_ready_drops);
seq_printf(seq,
"%-8s %-10s %-16s %-10s %-16s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s\n",
"Psock",
"RX-Msgs",
"RX-Bytes",
"TX-Msgs",
"TX-Bytes",
"Reserved",
"Unreserved",
"RX-Aborts",
"RX-Intr",
"RX-Unrecov",
"RX-MemFail",
"RX-NeedMor",
"RX-BadLen",
"RX-TooBig",
"RX-Timeout",
"TX-Aborts");
seq_printf(seq,
"%-8s %-10llu %-16llu %-10llu %-16llu %-10llu %-10llu %-10u %-10u %-10u %-10u %-10u %-10u %-10u %-10u %-10u\n",
"",
strp_stats.msgs,
strp_stats.bytes,
psock_stats.tx_msgs,
psock_stats.tx_bytes,
psock_stats.reserved,
psock_stats.unreserved,
strp_stats.aborts,
strp_stats.interrupted,
strp_stats.unrecov_intr,
strp_stats.mem_fail,
strp_stats.need_more_hdr,
strp_stats.bad_hdr_len,
strp_stats.msg_too_big,
strp_stats.msg_timeouts,
psock_stats.tx_aborts);
return 0;
}
static int kcm_proc_init_net(struct net *net)
{
if (!proc_create_net_single("kcm_stats", 0444, net->proc_net,
kcm_stats_seq_show, NULL))
goto out_kcm_stats;
if (!proc_create_net("kcm", 0444, net->proc_net, &kcm_seq_ops,
sizeof(struct kcm_proc_mux_state)))
goto out_kcm;
return 0;
out_kcm:
remove_proc_entry("kcm_stats", net->proc_net);
out_kcm_stats:
return -ENOMEM;
}
static void kcm_proc_exit_net(struct net *net)
{
remove_proc_entry("kcm", net->proc_net);
remove_proc_entry("kcm_stats", net->proc_net);
}
static struct pernet_operations kcm_net_ops = {
.init = kcm_proc_init_net,
.exit = kcm_proc_exit_net,
};
int __init kcm_proc_init(void)
{
return register_pernet_subsys(&kcm_net_ops);
}
void __exit kcm_proc_exit(void)
{
unregister_pernet_subsys(&kcm_net_ops);
}
#endif /* CONFIG_PROC_FS */
| linux-master | net/kcm/kcmproc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IUCV protocol stack for Linux on zSeries
*
* Copyright IBM Corp. 2006, 2009
*
* Author(s): Jennifer Hunt <[email protected]>
* Hendrik Brueckner <[email protected]>
* PM functions:
* Ursula Braun <[email protected]>
*/
#define KMSG_COMPONENT "af_iucv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/filter.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/security.h>
#include <net/sock.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
#include <linux/kmod.h>
#include <net/iucv/af_iucv.h>
#define VERSION "1.2"
static char iucv_userid[80];
static struct proto iucv_proto = {
.name = "AF_IUCV",
.owner = THIS_MODULE,
.obj_size = sizeof(struct iucv_sock),
};
static struct iucv_interface *pr_iucv;
static struct iucv_handler af_iucv_handler;
/* special AF_IUCV IPRM messages */
static const u8 iprm_shutdown[8] =
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
#define TRGCLS_SIZE sizeof_field(struct iucv_message, class)
#define __iucv_sock_wait(sk, condition, timeo, ret) \
do { \
DEFINE_WAIT(__wait); \
long __timeo = timeo; \
ret = 0; \
prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
while (!(condition)) { \
if (!__timeo) { \
ret = -EAGAIN; \
break; \
} \
if (signal_pending(current)) { \
ret = sock_intr_errno(__timeo); \
break; \
} \
release_sock(sk); \
__timeo = schedule_timeout(__timeo); \
lock_sock(sk); \
ret = sock_error(sk); \
if (ret) \
break; \
} \
finish_wait(sk_sleep(sk), &__wait); \
} while (0)
#define iucv_sock_wait(sk, condition, timeo) \
({ \
int __ret = 0; \
if (!(condition)) \
__iucv_sock_wait(sk, condition, timeo, __ret); \
__ret; \
})
static struct sock *iucv_accept_dequeue(struct sock *parent,
struct socket *newsock);
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify);
static struct iucv_sock_list iucv_sk_list = {
.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
.autobind_name = ATOMIC_INIT(0)
};
static inline void high_nmcpy(unsigned char *dst, char *src)
{
memcpy(dst, src, 8);
}
static inline void low_nmcpy(unsigned char *dst, char *src)
{
memcpy(&dst[8], src, 8);
}
/**
* iucv_msg_length() - Returns the length of an iucv message.
* @msg: Pointer to struct iucv_message, MUST NOT be NULL
*
* The function returns the length of the specified iucv message @msg of data
* stored in a buffer and of data stored in the parameter list (PRMDATA).
*
* For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
* data:
* PRMDATA[0..6] socket data (max 7 bytes);
* PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
*
* The socket data length is computed by subtracting the socket data length
* value from 0xFF.
* If the socket data len is greater 7, then PRMDATA can be used for special
* notifications (see iucv_sock_shutdown); and further,
* if the socket data len is > 7, the function returns 8.
*
* Use this function to allocate socket buffers to store iucv message data.
*/
static inline size_t iucv_msg_length(struct iucv_message *msg)
{
size_t datalen;
if (msg->flags & IUCV_IPRMDATA) {
datalen = 0xff - msg->rmmsg[7];
return (datalen < 8) ? datalen : 8;
}
return msg->length;
}
/**
* iucv_sock_in_state() - check for specific states
* @sk: sock structure
* @state: first iucv sk state
* @state2: second iucv sk state
*
* Returns true if the socket in either in the first or second state.
*/
static int iucv_sock_in_state(struct sock *sk, int state, int state2)
{
return (sk->sk_state == state || sk->sk_state == state2);
}
/**
* iucv_below_msglim() - function to check if messages can be sent
* @sk: sock structure
*
* Returns true if the send queue length is lower than the message limit.
* Always returns true if the socket is not connected (no iucv path for
* checking the message limit).
*/
static inline int iucv_below_msglim(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
if (sk->sk_state != IUCV_CONNECTED)
return 1;
if (iucv->transport == AF_IUCV_TRANS_IUCV)
return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim);
else
return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
(atomic_read(&iucv->pendings) <= 0));
}
/*
* iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
*/
static void iucv_sock_wake_msglim(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_all(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
/*
* afiucv_hs_send() - send a message through HiperSockets transport
*/
static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
struct sk_buff *skb, u8 flags)
{
struct iucv_sock *iucv = iucv_sk(sock);
struct af_iucv_trans_hdr *phs_hdr;
int err, confirm_recv = 0;
phs_hdr = skb_push(skb, sizeof(*phs_hdr));
memset(phs_hdr, 0, sizeof(*phs_hdr));
skb_reset_network_header(skb);
phs_hdr->magic = ETH_P_AF_IUCV;
phs_hdr->version = 1;
phs_hdr->flags = flags;
if (flags == AF_IUCV_FLAG_SYN)
phs_hdr->window = iucv->msglimit;
else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
confirm_recv = atomic_read(&iucv->msg_recv);
phs_hdr->window = confirm_recv;
if (confirm_recv)
phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
}
memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
if (imsg)
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
skb->dev = iucv->hs_dev;
if (!skb->dev) {
err = -ENODEV;
goto err_free;
}
dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len);
if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
err = -ENETDOWN;
goto err_free;
}
if (skb->len > skb->dev->mtu) {
if (sock->sk_type == SOCK_SEQPACKET) {
err = -EMSGSIZE;
goto err_free;
}
err = pskb_trim(skb, skb->dev->mtu);
if (err)
goto err_free;
}
skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
atomic_inc(&iucv->skbs_in_xmit);
err = dev_queue_xmit(skb);
if (net_xmit_eval(err)) {
atomic_dec(&iucv->skbs_in_xmit);
} else {
atomic_sub(confirm_recv, &iucv->msg_recv);
WARN_ON(atomic_read(&iucv->msg_recv) < 0);
}
return net_xmit_eval(err);
err_free:
kfree_skb(skb);
return err;
}
static struct sock *__iucv_get_sock_by_name(char *nm)
{
struct sock *sk;
sk_for_each(sk, &iucv_sk_list.head)
if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
return sk;
return NULL;
}
static void iucv_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_error_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive iucv socket %p\n", sk);
return;
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc);
}
/* Cleanup Listen */
static void iucv_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
/* Close non-accepted connections */
while ((sk = iucv_accept_dequeue(parent, NULL))) {
iucv_sock_close(sk);
iucv_sock_kill(sk);
}
parent->sk_state = IUCV_CLOSED;
}
static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
{
write_lock_bh(&l->lock);
sk_add_node(sk, &l->head);
write_unlock_bh(&l->lock);
}
static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
{
write_lock_bh(&l->lock);
sk_del_node_init(sk);
write_unlock_bh(&l->lock);
}
/* Kill socket (only if zapped and orphaned) */
static void iucv_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
iucv_sock_unlink(&iucv_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
/* Terminate an IUCV path */
static void iucv_sever_path(struct sock *sk, int with_user_data)
{
unsigned char user_data[16];
struct iucv_sock *iucv = iucv_sk(sk);
struct iucv_path *path = iucv->path;
if (iucv->path) {
iucv->path = NULL;
if (with_user_data) {
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
ASCEBC(user_data, sizeof(user_data));
pr_iucv->path_sever(path, user_data);
} else
pr_iucv->path_sever(path, NULL);
iucv_path_free(path);
}
}
/* Send controlling flags through an IUCV socket for HIPER transport */
static int iucv_send_ctrl(struct sock *sk, u8 flags)
{
struct iucv_sock *iucv = iucv_sk(sk);
int err = 0;
int blen;
struct sk_buff *skb;
u8 shutdown = 0;
blen = sizeof(struct af_iucv_trans_hdr) +
LL_RESERVED_SPACE(iucv->hs_dev);
if (sk->sk_shutdown & SEND_SHUTDOWN) {
/* controlling flags should be sent anyway */
shutdown = sk->sk_shutdown;
sk->sk_shutdown &= RCV_SHUTDOWN;
}
skb = sock_alloc_send_skb(sk, blen, 1, &err);
if (skb) {
skb_reserve(skb, blen);
err = afiucv_hs_send(NULL, sk, skb, flags);
}
if (shutdown)
sk->sk_shutdown = shutdown;
return err;
}
/* Close an IUCV socket */
static void iucv_sock_close(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
unsigned long timeo;
int err = 0;
lock_sock(sk);
switch (sk->sk_state) {
case IUCV_LISTEN:
iucv_sock_cleanup_listen(sk);
break;
case IUCV_CONNECTED:
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
fallthrough;
case IUCV_DISCONN:
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) {
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
timeo = sk->sk_lingertime;
else
timeo = IUCV_DISCONN_TIMEOUT;
iucv_sock_wait(sk,
iucv_sock_in_state(sk, IUCV_CLOSED, 0),
timeo);
}
fallthrough;
case IUCV_CLOSING:
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
sk->sk_err = ECONNRESET;
sk->sk_state_change(sk);
skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
fallthrough;
default:
iucv_sever_path(sk, 1);
}
if (iucv->hs_dev) {
dev_put(iucv->hs_dev);
iucv->hs_dev = NULL;
sk->sk_bound_dev_if = 0;
}
/* mark socket for deletion by iucv_sock_kill() */
sock_set_flag(sk, SOCK_ZAPPED);
release_sock(sk);
}
static void iucv_sock_init(struct sock *sk, struct sock *parent)
{
if (parent) {
sk->sk_type = parent->sk_type;
security_sk_clone(parent, sk);
}
}
static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
{
struct sock *sk;
struct iucv_sock *iucv;
sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
if (!sk)
return NULL;
iucv = iucv_sk(sk);
sock_init_data(sock, sk);
INIT_LIST_HEAD(&iucv->accept_q);
spin_lock_init(&iucv->accept_q_lock);
skb_queue_head_init(&iucv->send_skb_q);
INIT_LIST_HEAD(&iucv->message_q.list);
spin_lock_init(&iucv->message_q.lock);
skb_queue_head_init(&iucv->backlog_skb_q);
iucv->send_tag = 0;
atomic_set(&iucv->pendings, 0);
iucv->flags = 0;
iucv->msglimit = 0;
atomic_set(&iucv->skbs_in_xmit, 0);
atomic_set(&iucv->msg_sent, 0);
atomic_set(&iucv->msg_recv, 0);
iucv->path = NULL;
iucv->sk_txnotify = afiucv_hs_callback_txnotify;
memset(&iucv->init, 0, sizeof(iucv->init));
if (pr_iucv)
iucv->transport = AF_IUCV_TRANS_IUCV;
else
iucv->transport = AF_IUCV_TRANS_HIPER;
sk->sk_destruct = iucv_sock_destruct;
sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = IUCV_OPEN;
iucv_sock_link(&iucv_sk_list, sk);
return sk;
}
static void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
{
unsigned long flags;
struct iucv_sock *par = iucv_sk(parent);
sock_hold(sk);
spin_lock_irqsave(&par->accept_q_lock, flags);
list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
spin_unlock_irqrestore(&par->accept_q_lock, flags);
iucv_sk(sk)->parent = parent;
sk_acceptq_added(parent);
}
static void iucv_accept_unlink(struct sock *sk)
{
unsigned long flags;
struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
spin_lock_irqsave(&par->accept_q_lock, flags);
list_del_init(&iucv_sk(sk)->accept_q);
spin_unlock_irqrestore(&par->accept_q_lock, flags);
sk_acceptq_removed(iucv_sk(sk)->parent);
iucv_sk(sk)->parent = NULL;
sock_put(sk);
}
static struct sock *iucv_accept_dequeue(struct sock *parent,
struct socket *newsock)
{
struct iucv_sock *isk, *n;
struct sock *sk;
list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
sk = (struct sock *) isk;
lock_sock(sk);
if (sk->sk_state == IUCV_CLOSED) {
iucv_accept_unlink(sk);
release_sock(sk);
continue;
}
if (sk->sk_state == IUCV_CONNECTED ||
sk->sk_state == IUCV_DISCONN ||
!newsock) {
iucv_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
release_sock(sk);
return sk;
}
release_sock(sk);
}
return NULL;
}
static void __iucv_auto_name(struct iucv_sock *iucv)
{
char name[12];
sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
while (__iucv_get_sock_by_name(name)) {
sprintf(name, "%08x",
atomic_inc_return(&iucv_sk_list.autobind_name));
}
memcpy(iucv->src_name, name, 8);
}
/* Bind an unbound socket */
static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
char uid[sizeof(sa->siucv_user_id)];
struct sock *sk = sock->sk;
struct iucv_sock *iucv;
int err = 0;
struct net_device *dev;
/* Verify the input sockaddr */
if (addr_len < sizeof(struct sockaddr_iucv) ||
addr->sa_family != AF_IUCV)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != IUCV_OPEN) {
err = -EBADFD;
goto done;
}
write_lock_bh(&iucv_sk_list.lock);
iucv = iucv_sk(sk);
if (__iucv_get_sock_by_name(sa->siucv_name)) {
err = -EADDRINUSE;
goto done_unlock;
}
if (iucv->path)
goto done_unlock;
/* Bind the socket */
if (pr_iucv)
if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
goto vm_bind; /* VM IUCV transport */
/* try hiper transport */
memcpy(uid, sa->siucv_user_id, sizeof(uid));
ASCEBC(uid, 8);
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if (!memcmp(dev->perm_addr, uid, 8)) {
memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
/* Check for uninitialized siucv_name */
if (strncmp(sa->siucv_name, " ", 8) == 0)
__iucv_auto_name(iucv);
else
memcpy(iucv->src_name, sa->siucv_name, 8);
sk->sk_bound_dev_if = dev->ifindex;
iucv->hs_dev = dev;
dev_hold(dev);
sk->sk_state = IUCV_BOUND;
iucv->transport = AF_IUCV_TRANS_HIPER;
if (!iucv->msglimit)
iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
rcu_read_unlock();
goto done_unlock;
}
}
rcu_read_unlock();
vm_bind:
if (pr_iucv) {
/* use local userid for backward compat */
memcpy(iucv->src_name, sa->siucv_name, 8);
memcpy(iucv->src_user_id, iucv_userid, 8);
sk->sk_state = IUCV_BOUND;
iucv->transport = AF_IUCV_TRANS_IUCV;
sk->sk_allocation |= GFP_DMA;
if (!iucv->msglimit)
iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
goto done_unlock;
}
/* found no dev to bind */
err = -ENODEV;
done_unlock:
/* Release the socket list lock */
write_unlock_bh(&iucv_sk_list.lock);
done:
release_sock(sk);
return err;
}
/* Automatically bind an unbound socket */
static int iucv_sock_autobind(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
int err = 0;
if (unlikely(!pr_iucv))
return -EPROTO;
memcpy(iucv->src_user_id, iucv_userid, 8);
iucv->transport = AF_IUCV_TRANS_IUCV;
sk->sk_allocation |= GFP_DMA;
write_lock_bh(&iucv_sk_list.lock);
__iucv_auto_name(iucv);
write_unlock_bh(&iucv_sk_list.lock);
if (!iucv->msglimit)
iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
return err;
}
static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
{
DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned char user_data[16];
int err;
high_nmcpy(user_data, sa->siucv_name);
low_nmcpy(user_data, iucv->src_name);
ASCEBC(user_data, sizeof(user_data));
/* Create path. */
iucv->path = iucv_path_alloc(iucv->msglimit,
IUCV_IPRMDATA, GFP_KERNEL);
if (!iucv->path) {
err = -ENOMEM;
goto done;
}
err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
sa->siucv_user_id, NULL, user_data,
sk);
if (err) {
iucv_path_free(iucv->path);
iucv->path = NULL;
switch (err) {
case 0x0b: /* Target communicator is not logged on */
err = -ENETUNREACH;
break;
case 0x0d: /* Max connections for this guest exceeded */
case 0x0e: /* Max connections for target guest exceeded */
err = -EAGAIN;
break;
case 0x0f: /* Missing IUCV authorization */
err = -EACCES;
break;
default:
err = -ECONNREFUSED;
break;
}
}
done:
return err;
}
/* Connect an unconnected socket */
static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
int err;
if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV)
return -EINVAL;
if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
return -EBADFD;
if (sk->sk_state == IUCV_OPEN &&
iucv->transport == AF_IUCV_TRANS_HIPER)
return -EBADFD; /* explicit bind required */
if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
return -EINVAL;
if (sk->sk_state == IUCV_OPEN) {
err = iucv_sock_autobind(sk);
if (unlikely(err))
return err;
}
lock_sock(sk);
/* Set the destination information */
memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
memcpy(iucv->dst_name, sa->siucv_name, 8);
if (iucv->transport == AF_IUCV_TRANS_HIPER)
err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
else
err = afiucv_path_connect(sock, addr);
if (err)
goto done;
if (sk->sk_state != IUCV_CONNECTED)
err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
IUCV_DISCONN),
sock_sndtimeo(sk, flags & O_NONBLOCK));
if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
err = -ECONNREFUSED;
if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
iucv_sever_path(sk, 0);
done:
release_sock(sk);
return err;
}
/* Move a socket into listening state. */
static int iucv_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err;
lock_sock(sk);
err = -EINVAL;
if (sk->sk_state != IUCV_BOUND)
goto done;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto done;
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = IUCV_LISTEN;
err = 0;
done:
release_sock(sk);
return err;
}
/* Accept a pending connection */
static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
int flags, bool kern)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_state != IUCV_LISTEN) {
err = -EBADFD;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
/* Wait for an incoming connection */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_state != IUCV_LISTEN) {
err = -EBADFD;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
done:
release_sock(sk);
return err;
}
static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
int peer)
{
DECLARE_SOCKADDR(struct sockaddr_iucv *, siucv, addr);
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
addr->sa_family = AF_IUCV;
if (peer) {
memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
memcpy(siucv->siucv_name, iucv->dst_name, 8);
} else {
memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
memcpy(siucv->siucv_name, iucv->src_name, 8);
}
memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
return sizeof(struct sockaddr_iucv);
}
/**
* iucv_send_iprm() - Send socket data in parameter list of an iucv message.
* @path: IUCV path
* @msg: Pointer to a struct iucv_message
* @skb: The socket data to send, skb->len MUST BE <= 7
*
* Send the socket data in the parameter list in the iucv message
* (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
* list and the socket data len at index 7 (last byte).
* See also iucv_msg_length().
*
* Returns the error code from the iucv_message_send() call.
*/
static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
struct sk_buff *skb)
{
u8 prmdata[8];
memcpy(prmdata, (void *) skb->data, skb->len);
prmdata[7] = 0xff - (u8) skb->len;
return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
(void *) prmdata, 8);
}
static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
size_t headroom = 0;
size_t linear;
struct sk_buff *skb;
struct iucv_message txmsg = {0};
struct cmsghdr *cmsg;
int cmsg_done;
long timeo;
char user_id[9];
char appl_id[9];
int err;
int noblock = msg->msg_flags & MSG_DONTWAIT;
err = sock_error(sk);
if (err)
return err;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
/* SOCK_SEQPACKET: we do not support segmented records */
if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
return -EOPNOTSUPP;
lock_sock(sk);
if (sk->sk_shutdown & SEND_SHUTDOWN) {
err = -EPIPE;
goto out;
}
/* Return if the socket is not in connected state */
if (sk->sk_state != IUCV_CONNECTED) {
err = -ENOTCONN;
goto out;
}
/* initialize defaults */
cmsg_done = 0; /* check for duplicate headers */
/* iterate over control messages */
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg)) {
err = -EINVAL;
goto out;
}
if (cmsg->cmsg_level != SOL_IUCV)
continue;
if (cmsg->cmsg_type & cmsg_done) {
err = -EINVAL;
goto out;
}
cmsg_done |= cmsg->cmsg_type;
switch (cmsg->cmsg_type) {
case SCM_IUCV_TRGCLS:
if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
err = -EINVAL;
goto out;
}
/* set iucv message target class */
memcpy(&txmsg.class,
(void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
break;
default:
err = -EINVAL;
goto out;
}
}
/* allocate one skb for each iucv message:
* this is fine for SOCK_SEQPACKET (unless we want to support
* segmented records using the MSG_EOR flag), but
* for SOCK_STREAM we might want to improve it in future */
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
headroom = sizeof(struct af_iucv_trans_hdr) +
LL_RESERVED_SPACE(iucv->hs_dev);
linear = min(len, PAGE_SIZE - headroom);
} else {
if (len < PAGE_SIZE) {
linear = len;
} else {
/* In nonlinear "classic" iucv skb,
* reserve space for iucv_array
*/
headroom = sizeof(struct iucv_array) *
(MAX_SKB_FRAGS + 1);
linear = PAGE_SIZE - headroom;
}
}
skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
noblock, &err, 0);
if (!skb)
goto out;
if (headroom)
skb_reserve(skb, headroom);
skb_put(skb, linear);
skb->len = len;
skb->data_len = len - linear;
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
if (err)
goto fail;
/* wait if outstanding messages for iucv path has reached */
timeo = sock_sndtimeo(sk, noblock);
err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
if (err)
goto fail;
/* return -ECONNRESET if the socket is no longer connected */
if (sk->sk_state != IUCV_CONNECTED) {
err = -ECONNRESET;
goto fail;
}
/* increment and save iucv message tag for msg_completion cbk */
txmsg.tag = iucv->send_tag++;
IUCV_SKB_CB(skb)->tag = txmsg.tag;
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_sent);
err = afiucv_hs_send(&txmsg, sk, skb, 0);
if (err) {
atomic_dec(&iucv->msg_sent);
goto out;
}
} else { /* Classic VM IUCV transport */
skb_queue_tail(&iucv->send_skb_q, skb);
atomic_inc(&iucv->skbs_in_xmit);
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
skb->len <= 7) {
err = iucv_send_iprm(iucv->path, &txmsg, skb);
/* on success: there is no message_complete callback */
/* for an IPRMDATA msg; remove skb from send queue */
if (err == 0) {
atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q);
consume_skb(skb);
}
/* this error should never happen since the */
/* IUCV_IPRMDATA path flag is set... sever path */
if (err == 0x15) {
pr_iucv->path_sever(iucv->path, NULL);
atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE;
goto fail;
}
} else if (skb_is_nonlinear(skb)) {
struct iucv_array *iba = (struct iucv_array *)skb->head;
int i;
/* skip iucv_array lying in the headroom */
iba[0].address = (u32)(addr_t)skb->data;
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
iba[i + 1].address =
(u32)(addr_t)skb_frag_address(frag);
iba[i + 1].length = (u32)skb_frag_size(frag);
}
err = pr_iucv->message_send(iucv->path, &txmsg,
IUCV_IPBUFLST, 0,
(void *)iba, skb->len);
} else { /* non-IPRM Linear skb */
err = pr_iucv->message_send(iucv->path, &txmsg,
0, 0, (void *)skb->data, skb->len);
}
if (err) {
if (err == 3) {
user_id[8] = 0;
memcpy(user_id, iucv->dst_user_id, 8);
appl_id[8] = 0;
memcpy(appl_id, iucv->dst_name, 8);
pr_err(
"Application %s on z/VM guest %s exceeds message limit\n",
appl_id, user_id);
err = -EAGAIN;
} else {
err = -EPIPE;
}
atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q);
goto fail;
}
}
release_sock(sk);
return len;
fail:
kfree_skb(skb);
out:
release_sock(sk);
return err;
}
static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
{
size_t headroom, linear;
struct sk_buff *skb;
int err;
if (len < PAGE_SIZE) {
headroom = 0;
linear = len;
} else {
headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
linear = PAGE_SIZE - headroom;
}
skb = alloc_skb_with_frags(headroom + linear, len - linear,
0, &err, GFP_ATOMIC | GFP_DMA);
WARN_ONCE(!skb,
"alloc of recv iucv skb len=%lu failed with errcode=%d\n",
len, err);
if (skb) {
if (headroom)
skb_reserve(skb, headroom);
skb_put(skb, linear);
skb->len = len;
skb->data_len = len - linear;
}
return skb;
}
/* iucv_process_message() - Receive a single outstanding IUCV message
*
* Locking: must be called with message_q.lock held
*/
static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
struct iucv_path *path,
struct iucv_message *msg)
{
int rc;
unsigned int len;
len = iucv_msg_length(msg);
/* store msg target class in the second 4 bytes of skb ctrl buffer */
/* Note: the first 4 bytes are reserved for msg tag */
IUCV_SKB_CB(skb)->class = msg->class;
/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
skb->data = NULL;
skb->len = 0;
}
} else {
if (skb_is_nonlinear(skb)) {
struct iucv_array *iba = (struct iucv_array *)skb->head;
int i;
iba[0].address = (u32)(addr_t)skb->data;
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
iba[i + 1].address =
(u32)(addr_t)skb_frag_address(frag);
iba[i + 1].length = (u32)skb_frag_size(frag);
}
rc = pr_iucv->message_receive(path, msg,
IUCV_IPBUFLST,
(void *)iba, len, NULL);
} else {
rc = pr_iucv->message_receive(path, msg,
msg->flags & IUCV_IPRMDATA,
skb->data, len, NULL);
}
if (rc) {
kfree_skb(skb);
return;
}
WARN_ON_ONCE(skb->len != len);
}
IUCV_SKB_CB(skb)->offset = 0;
if (sk_filter(sk, skb)) {
atomic_inc(&sk->sk_drops); /* skb rejected by filter */
kfree_skb(skb);
return;
}
if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */
skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
}
/* iucv_process_message_q() - Process outstanding IUCV messages
*
* Locking: must be called with message_q.lock held
*/
static void iucv_process_message_q(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
struct sk_buff *skb;
struct sock_msg_q *p, *n;
list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
if (!skb)
break;
iucv_process_message(sk, skb, p->path, &p->msg);
list_del(&p->list);
kfree(p);
if (!skb_queue_empty(&iucv->backlog_skb_q))
break;
}
}
static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned int copied, rlen;
struct sk_buff *skb, *rskb, *cskb;
int err = 0;
u32 offset;
if ((sk->sk_state == IUCV_DISCONN) &&
skb_queue_empty(&iucv->backlog_skb_q) &&
skb_queue_empty(&sk->sk_receive_queue) &&
list_empty(&iucv->message_q.list))
return 0;
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
/* receive/dequeue next skb:
* the function understands MSG_PEEK and, thus, does not dequeue skb */
skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
offset = IUCV_SKB_CB(skb)->offset;
rlen = skb->len - offset; /* real length of skb */
copied = min_t(unsigned int, rlen, len);
if (!rlen)
sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
cskb = skb;
if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return -EFAULT;
}
/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
if (sk->sk_type == SOCK_SEQPACKET) {
if (copied < rlen)
msg->msg_flags |= MSG_TRUNC;
/* each iucv message contains a complete record */
msg->msg_flags |= MSG_EOR;
}
/* create control message to store iucv msg target class:
* get the trgcls from the control buffer of the skb due to
* fragmentation of original iucv message. */
err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
sizeof(IUCV_SKB_CB(skb)->class),
(void *)&IUCV_SKB_CB(skb)->class);
if (err) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return err;
}
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
/* SOCK_STREAM: re-queue skb if it contains unreceived data */
if (sk->sk_type == SOCK_STREAM) {
if (copied < rlen) {
IUCV_SKB_CB(skb)->offset = offset + copied;
skb_queue_head(&sk->sk_receive_queue, skb);
goto done;
}
}
consume_skb(skb);
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_recv);
if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
WARN_ON(1);
iucv_sock_close(sk);
return -EFAULT;
}
}
/* Queue backlog skbs */
spin_lock_bh(&iucv->message_q.lock);
rskb = skb_dequeue(&iucv->backlog_skb_q);
while (rskb) {
IUCV_SKB_CB(rskb)->offset = 0;
if (__sock_queue_rcv_skb(sk, rskb)) {
/* handle rcv queue full */
skb_queue_head(&iucv->backlog_skb_q,
rskb);
break;
}
rskb = skb_dequeue(&iucv->backlog_skb_q);
}
if (skb_queue_empty(&iucv->backlog_skb_q)) {
if (!list_empty(&iucv->message_q.list))
iucv_process_message_q(sk);
if (atomic_read(&iucv->msg_recv) >=
iucv->msglimit / 2) {
err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
if (err) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
}
}
spin_unlock_bh(&iucv->message_q.lock);
}
done:
/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
copied = rlen;
return copied;
}
static inline __poll_t iucv_accept_poll(struct sock *parent)
{
struct iucv_sock *isk, *n;
struct sock *sk;
list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
sk = (struct sock *) isk;
if (sk->sk_state == IUCV_CONNECTED)
return EPOLLIN | EPOLLRDNORM;
}
return 0;
}
static __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
__poll_t mask = 0;
sock_poll_wait(file, sock, wait);
if (sk->sk_state == IUCV_LISTEN)
return iucv_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= EPOLLRDHUP;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= EPOLLHUP;
if (!skb_queue_empty(&sk->sk_receive_queue) ||
(sk->sk_shutdown & RCV_SHUTDOWN))
mask |= EPOLLIN | EPOLLRDNORM;
if (sk->sk_state == IUCV_CLOSED)
mask |= EPOLLHUP;
if (sk->sk_state == IUCV_DISCONN)
mask |= EPOLLIN;
if (sock_writeable(sk) && iucv_below_msglim(sk))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
else
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
static int iucv_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
struct iucv_message txmsg;
int err = 0;
how++;
if ((how & ~SHUTDOWN_MASK) || !how)
return -EINVAL;
lock_sock(sk);
switch (sk->sk_state) {
case IUCV_LISTEN:
case IUCV_DISCONN:
case IUCV_CLOSING:
case IUCV_CLOSED:
err = -ENOTCONN;
goto fail;
default:
break;
}
if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
sk->sk_state == IUCV_CONNECTED) {
if (iucv->transport == AF_IUCV_TRANS_IUCV) {
txmsg.class = 0;
txmsg.tag = 0;
err = pr_iucv->message_send(iucv->path, &txmsg,
IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
if (err) {
switch (err) {
case 1:
err = -ENOTCONN;
break;
case 2:
err = -ECONNRESET;
break;
default:
err = -ENOTCONN;
break;
}
}
} else
iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
}
sk->sk_shutdown |= how;
if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
iucv->path) {
err = pr_iucv->path_quiesce(iucv->path, NULL);
if (err)
err = -ENOTCONN;
/* skb_queue_purge(&sk->sk_receive_queue); */
}
skb_queue_purge(&sk->sk_receive_queue);
}
/* Wake up anyone sleeping in poll */
sk->sk_state_change(sk);
fail:
release_sock(sk);
return err;
}
static int iucv_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err = 0;
if (!sk)
return 0;
iucv_sock_close(sk);
sock_orphan(sk);
iucv_sock_kill(sk);
return err;
}
/* getsockopt and setsockopt */
static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
int val;
int rc;
if (level != SOL_IUCV)
return -ENOPROTOOPT;
if (optlen < sizeof(int))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(int)))
return -EFAULT;
rc = 0;
lock_sock(sk);
switch (optname) {
case SO_IPRMDATA_MSG:
if (val)
iucv->flags |= IUCV_IPRMDATA;
else
iucv->flags &= ~IUCV_IPRMDATA;
break;
case SO_MSGLIMIT:
switch (sk->sk_state) {
case IUCV_OPEN:
case IUCV_BOUND:
if (val < 1 || val > U16_MAX)
rc = -EINVAL;
else
iucv->msglimit = val;
break;
default:
rc = -EINVAL;
break;
}
break;
default:
rc = -ENOPROTOOPT;
break;
}
release_sock(sk);
return rc;
}
static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned int val;
int len;
if (level != SOL_IUCV)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
len = min_t(unsigned int, len, sizeof(int));
switch (optname) {
case SO_IPRMDATA_MSG:
val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
break;
case SO_MSGLIMIT:
lock_sock(sk);
val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
: iucv->msglimit; /* default */
release_sock(sk);
break;
case SO_MSGSIZE:
if (sk->sk_state == IUCV_OPEN)
return -EBADFD;
val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
0x7fffffff;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/* Callback wrappers - called from iucv base support */
static int iucv_callback_connreq(struct iucv_path *path,
u8 ipvmid[8], u8 ipuser[16])
{
unsigned char user_data[16];
unsigned char nuser_data[16];
unsigned char src_name[8];
struct sock *sk, *nsk;
struct iucv_sock *iucv, *niucv;
int err;
memcpy(src_name, ipuser, 8);
EBCASC(src_name, 8);
/* Find out if this path belongs to af_iucv. */
read_lock(&iucv_sk_list.lock);
iucv = NULL;
sk = NULL;
sk_for_each(sk, &iucv_sk_list.head)
if (sk->sk_state == IUCV_LISTEN &&
!memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
/*
* Found a listening socket with
* src_name == ipuser[0-7].
*/
iucv = iucv_sk(sk);
break;
}
read_unlock(&iucv_sk_list.lock);
if (!iucv)
/* No socket found, not one of our paths. */
return -EINVAL;
bh_lock_sock(sk);
/* Check if parent socket is listening */
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
ASCEBC(user_data, sizeof(user_data));
if (sk->sk_state != IUCV_LISTEN) {
err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
/* Check for backlog size */
if (sk_acceptq_is_full(sk)) {
err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
/* Create the new socket */
nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
if (!nsk) {
err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
niucv = iucv_sk(nsk);
iucv_sock_init(nsk, sk);
niucv->transport = AF_IUCV_TRANS_IUCV;
nsk->sk_allocation |= GFP_DMA;
/* Set the new iucv_sock */
memcpy(niucv->dst_name, ipuser + 8, 8);
EBCASC(niucv->dst_name, 8);
memcpy(niucv->dst_user_id, ipvmid, 8);
memcpy(niucv->src_name, iucv->src_name, 8);
memcpy(niucv->src_user_id, iucv->src_user_id, 8);
niucv->path = path;
/* Call iucv_accept */
high_nmcpy(nuser_data, ipuser + 8);
memcpy(nuser_data + 8, niucv->src_name, 8);
ASCEBC(nuser_data + 8, 8);
/* set message limit for path based on msglimit of accepting socket */
niucv->msglimit = iucv->msglimit;
path->msglim = iucv->msglimit;
err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
if (err) {
iucv_sever_path(nsk, 1);
iucv_sock_kill(nsk);
goto fail;
}
iucv_accept_enqueue(sk, nsk);
/* Wake up accept */
nsk->sk_state = IUCV_CONNECTED;
sk->sk_data_ready(sk);
err = 0;
fail:
bh_unlock_sock(sk);
return 0;
}
static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
{
struct sock *sk = path->private;
sk->sk_state = IUCV_CONNECTED;
sk->sk_state_change(sk);
}
static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
{
struct sock *sk = path->private;
struct iucv_sock *iucv = iucv_sk(sk);
struct sk_buff *skb;
struct sock_msg_q *save_msg;
int len;
if (sk->sk_shutdown & RCV_SHUTDOWN) {
pr_iucv->message_reject(path, msg);
return;
}
spin_lock(&iucv->message_q.lock);
if (!list_empty(&iucv->message_q.list) ||
!skb_queue_empty(&iucv->backlog_skb_q))
goto save_message;
len = atomic_read(&sk->sk_rmem_alloc);
len += SKB_TRUESIZE(iucv_msg_length(msg));
if (len > sk->sk_rcvbuf)
goto save_message;
skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
if (!skb)
goto save_message;
iucv_process_message(sk, skb, path, msg);
goto out_unlock;
save_message:
save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
if (!save_msg)
goto out_unlock;
save_msg->path = path;
save_msg->msg = *msg;
list_add_tail(&save_msg->list, &iucv->message_q.list);
out_unlock:
spin_unlock(&iucv->message_q.lock);
}
static void iucv_callback_txdone(struct iucv_path *path,
struct iucv_message *msg)
{
struct sock *sk = path->private;
struct sk_buff *this = NULL;
struct sk_buff_head *list;
struct sk_buff *list_skb;
struct iucv_sock *iucv;
unsigned long flags;
iucv = iucv_sk(sk);
list = &iucv->send_skb_q;
bh_lock_sock(sk);
spin_lock_irqsave(&list->lock, flags);
skb_queue_walk(list, list_skb) {
if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
this = list_skb;
break;
}
}
if (this) {
atomic_dec(&iucv->skbs_in_xmit);
__skb_unlink(this, list);
}
spin_unlock_irqrestore(&list->lock, flags);
if (this) {
consume_skb(this);
/* wake up any process waiting for sending */
iucv_sock_wake_msglim(sk);
}
if (sk->sk_state == IUCV_CLOSING) {
if (atomic_read(&iucv->skbs_in_xmit) == 0) {
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
}
}
bh_unlock_sock(sk);
}
static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
{
struct sock *sk = path->private;
if (sk->sk_state == IUCV_CLOSED)
return;
bh_lock_sock(sk);
iucv_sever_path(sk, 1);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
}
/* called if the other communication side shuts down its RECV direction;
* in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
*/
static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
{
struct sock *sk = path->private;
bh_lock_sock(sk);
if (sk->sk_state != IUCV_CLOSED) {
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
}
bh_unlock_sock(sk);
}
static struct iucv_handler af_iucv_handler = {
.path_pending = iucv_callback_connreq,
.path_complete = iucv_callback_connack,
.path_severed = iucv_callback_connrej,
.message_pending = iucv_callback_rx,
.message_complete = iucv_callback_txdone,
.path_quiesced = iucv_callback_shutdown,
};
/***************** HiperSockets transport callbacks ********************/
static void afiucv_swap_src_dest(struct sk_buff *skb)
{
struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
char tmpID[8];
char tmpName[8];
ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
memcpy(tmpID, trans_hdr->srcUserID, 8);
memcpy(tmpName, trans_hdr->srcAppName, 8);
memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
memcpy(trans_hdr->destUserID, tmpID, 8);
memcpy(trans_hdr->destAppName, tmpName, 8);
skb_push(skb, ETH_HLEN);
memset(skb->data, 0, ETH_HLEN);
}
/*
* afiucv_hs_callback_syn - react on received SYN
*/
static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
{
struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
struct sock *nsk;
struct iucv_sock *iucv, *niucv;
int err;
iucv = iucv_sk(sk);
if (!iucv) {
/* no sock - connection refused */
afiucv_swap_src_dest(skb);
trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
err = dev_queue_xmit(skb);
goto out;
}
nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
bh_lock_sock(sk);
if ((sk->sk_state != IUCV_LISTEN) ||
sk_acceptq_is_full(sk) ||
!nsk) {
/* error on server socket - connection refused */
afiucv_swap_src_dest(skb);
trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
err = dev_queue_xmit(skb);
iucv_sock_kill(nsk);
bh_unlock_sock(sk);
goto out;
}
niucv = iucv_sk(nsk);
iucv_sock_init(nsk, sk);
niucv->transport = AF_IUCV_TRANS_HIPER;
niucv->msglimit = iucv->msglimit;
if (!trans_hdr->window)
niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
else
niucv->msglimit_peer = trans_hdr->window;
memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
memcpy(niucv->src_name, iucv->src_name, 8);
memcpy(niucv->src_user_id, iucv->src_user_id, 8);
nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
niucv->hs_dev = iucv->hs_dev;
dev_hold(niucv->hs_dev);
afiucv_swap_src_dest(skb);
trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
trans_hdr->window = niucv->msglimit;
/* if receiver acks the xmit connection is established */
err = dev_queue_xmit(skb);
if (!err) {
iucv_accept_enqueue(sk, nsk);
nsk->sk_state = IUCV_CONNECTED;
sk->sk_data_ready(sk);
} else
iucv_sock_kill(nsk);
bh_unlock_sock(sk);
out:
return NET_RX_SUCCESS;
}
/*
* afiucv_hs_callback_synack() - react on received SYN-ACK
*/
static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
if (!iucv || sk->sk_state != IUCV_BOUND) {
kfree_skb(skb);
return NET_RX_SUCCESS;
}
bh_lock_sock(sk);
iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
sk->sk_state = IUCV_CONNECTED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
consume_skb(skb);
return NET_RX_SUCCESS;
}
/*
* afiucv_hs_callback_synfin() - react on received SYN_FIN
*/
static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
if (!iucv || sk->sk_state != IUCV_BOUND) {
kfree_skb(skb);
return NET_RX_SUCCESS;
}
bh_lock_sock(sk);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
consume_skb(skb);
return NET_RX_SUCCESS;
}
/*
* afiucv_hs_callback_fin() - react on received FIN
*/
static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
/* other end of connection closed */
if (!iucv) {
kfree_skb(skb);
return NET_RX_SUCCESS;
}
bh_lock_sock(sk);
if (sk->sk_state == IUCV_CONNECTED) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
bh_unlock_sock(sk);
consume_skb(skb);
return NET_RX_SUCCESS;
}
/*
* afiucv_hs_callback_win() - react on received WIN
*/
static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
if (!iucv)
return NET_RX_SUCCESS;
if (sk->sk_state != IUCV_CONNECTED)
return NET_RX_SUCCESS;
atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
iucv_sock_wake_msglim(sk);
return NET_RX_SUCCESS;
}
/*
* afiucv_hs_callback_rx() - react on received data
*/
static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
if (!iucv) {
kfree_skb(skb);
return NET_RX_SUCCESS;
}
if (sk->sk_state != IUCV_CONNECTED) {
kfree_skb(skb);
return NET_RX_SUCCESS;
}
if (sk->sk_shutdown & RCV_SHUTDOWN) {
kfree_skb(skb);
return NET_RX_SUCCESS;
}
/* write stuff from iucv_msg to skb cb */
skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
IUCV_SKB_CB(skb)->offset = 0;
if (sk_filter(sk, skb)) {
atomic_inc(&sk->sk_drops); /* skb rejected by filter */
kfree_skb(skb);
return NET_RX_SUCCESS;
}
spin_lock(&iucv->message_q.lock);
if (skb_queue_empty(&iucv->backlog_skb_q)) {
if (__sock_queue_rcv_skb(sk, skb))
/* handle rcv queue full */
skb_queue_tail(&iucv->backlog_skb_q, skb);
} else
skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
spin_unlock(&iucv->message_q.lock);
return NET_RX_SUCCESS;
}
/*
* afiucv_hs_rcv() - base function for arriving data through HiperSockets
* transport
* called from netif RX softirq
*/
static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct iucv_sock *iucv;
struct af_iucv_trans_hdr *trans_hdr;
int err = NET_RX_SUCCESS;
char nullstring[8];
if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
kfree_skb(skb);
return NET_RX_SUCCESS;
}
trans_hdr = iucv_trans_hdr(skb);
EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
memset(nullstring, 0, sizeof(nullstring));
iucv = NULL;
sk = NULL;
read_lock(&iucv_sk_list.lock);
sk_for_each(sk, &iucv_sk_list.head) {
if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
if ((!memcmp(&iucv_sk(sk)->src_name,
trans_hdr->destAppName, 8)) &&
(!memcmp(&iucv_sk(sk)->src_user_id,
trans_hdr->destUserID, 8)) &&
(!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
(!memcmp(&iucv_sk(sk)->dst_user_id,
nullstring, 8))) {
iucv = iucv_sk(sk);
break;
}
} else {
if ((!memcmp(&iucv_sk(sk)->src_name,
trans_hdr->destAppName, 8)) &&
(!memcmp(&iucv_sk(sk)->src_user_id,
trans_hdr->destUserID, 8)) &&
(!memcmp(&iucv_sk(sk)->dst_name,
trans_hdr->srcAppName, 8)) &&
(!memcmp(&iucv_sk(sk)->dst_user_id,
trans_hdr->srcUserID, 8))) {
iucv = iucv_sk(sk);
break;
}
}
}
read_unlock(&iucv_sk_list.lock);
if (!iucv)
sk = NULL;
/* no sock
how should we send with no sock
1) send without sock no send rc checking?
2) introduce default sock to handle this cases
SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
data -> send FIN
SYN|ACK, SYN|FIN, FIN -> no action? */
switch (trans_hdr->flags) {
case AF_IUCV_FLAG_SYN:
/* connect request */
err = afiucv_hs_callback_syn(sk, skb);
break;
case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
/* connect request confirmed */
err = afiucv_hs_callback_synack(sk, skb);
break;
case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
/* connect request refused */
err = afiucv_hs_callback_synfin(sk, skb);
break;
case (AF_IUCV_FLAG_FIN):
/* close request */
err = afiucv_hs_callback_fin(sk, skb);
break;
case (AF_IUCV_FLAG_WIN):
err = afiucv_hs_callback_win(sk, skb);
if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
consume_skb(skb);
break;
}
fallthrough; /* and receive non-zero length data */
case (AF_IUCV_FLAG_SHT):
/* shutdown request */
fallthrough; /* and receive zero length data */
case 0:
/* plain data frame */
IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
err = afiucv_hs_callback_rx(sk, skb);
break;
default:
kfree_skb(skb);
}
return err;
}
/*
* afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets
* transport
*/
static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n)
{
struct iucv_sock *iucv = iucv_sk(sk);
if (sock_flag(sk, SOCK_ZAPPED))
return;
switch (n) {
case TX_NOTIFY_OK:
atomic_dec(&iucv->skbs_in_xmit);
iucv_sock_wake_msglim(sk);
break;
case TX_NOTIFY_PENDING:
atomic_inc(&iucv->pendings);
break;
case TX_NOTIFY_DELAYED_OK:
atomic_dec(&iucv->skbs_in_xmit);
if (atomic_dec_return(&iucv->pendings) <= 0)
iucv_sock_wake_msglim(sk);
break;
default:
atomic_dec(&iucv->skbs_in_xmit);
if (sk->sk_state == IUCV_CONNECTED) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
}
if (sk->sk_state == IUCV_CLOSING) {
if (atomic_read(&iucv->skbs_in_xmit) == 0) {
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
}
}
}
/*
* afiucv_netdev_event: handle netdev notifier chain events
*/
static int afiucv_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
struct sock *sk;
struct iucv_sock *iucv;
switch (event) {
case NETDEV_REBOOT:
case NETDEV_GOING_DOWN:
sk_for_each(sk, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
if ((iucv->hs_dev == event_dev) &&
(sk->sk_state == IUCV_CONNECTED)) {
if (event == NETDEV_GOING_DOWN)
iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
}
break;
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block afiucv_netdev_notifier = {
.notifier_call = afiucv_netdev_event,
};
static const struct proto_ops iucv_sock_ops = {
.family = PF_IUCV,
.owner = THIS_MODULE,
.release = iucv_sock_release,
.bind = iucv_sock_bind,
.connect = iucv_sock_connect,
.listen = iucv_sock_listen,
.accept = iucv_sock_accept,
.getname = iucv_sock_getname,
.sendmsg = iucv_sock_sendmsg,
.recvmsg = iucv_sock_recvmsg,
.poll = iucv_sock_poll,
.ioctl = sock_no_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
.shutdown = iucv_sock_shutdown,
.setsockopt = iucv_sock_setsockopt,
.getsockopt = iucv_sock_getsockopt,
};
static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
if (protocol && protocol != PF_IUCV)
return -EPROTONOSUPPORT;
sock->state = SS_UNCONNECTED;
switch (sock->type) {
case SOCK_STREAM:
case SOCK_SEQPACKET:
/* currently, proto ops can handle both sk types */
sock->ops = &iucv_sock_ops;
break;
default:
return -ESOCKTNOSUPPORT;
}
sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
if (!sk)
return -ENOMEM;
iucv_sock_init(sk, NULL);
return 0;
}
static const struct net_proto_family iucv_sock_family_ops = {
.family = AF_IUCV,
.owner = THIS_MODULE,
.create = iucv_sock_create,
};
static struct packet_type iucv_packet_type = {
.type = cpu_to_be16(ETH_P_AF_IUCV),
.func = afiucv_hs_rcv,
};
static int __init afiucv_init(void)
{
int err;
if (MACHINE_IS_VM && IS_ENABLED(CONFIG_IUCV)) {
cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
if (unlikely(err)) {
WARN_ON(err);
err = -EPROTONOSUPPORT;
goto out;
}
pr_iucv = &iucv_if;
} else {
memset(&iucv_userid, 0, sizeof(iucv_userid));
pr_iucv = NULL;
}
err = proto_register(&iucv_proto, 0);
if (err)
goto out;
err = sock_register(&iucv_sock_family_ops);
if (err)
goto out_proto;
if (pr_iucv) {
err = pr_iucv->iucv_register(&af_iucv_handler, 0);
if (err)
goto out_sock;
}
err = register_netdevice_notifier(&afiucv_netdev_notifier);
if (err)
goto out_notifier;
dev_add_pack(&iucv_packet_type);
return 0;
out_notifier:
if (pr_iucv)
pr_iucv->iucv_unregister(&af_iucv_handler, 0);
out_sock:
sock_unregister(PF_IUCV);
out_proto:
proto_unregister(&iucv_proto);
out:
return err;
}
static void __exit afiucv_exit(void)
{
if (pr_iucv)
pr_iucv->iucv_unregister(&af_iucv_handler, 0);
unregister_netdevice_notifier(&afiucv_netdev_notifier);
dev_remove_pack(&iucv_packet_type);
sock_unregister(PF_IUCV);
proto_unregister(&iucv_proto);
}
module_init(afiucv_init);
module_exit(afiucv_exit);
MODULE_AUTHOR("Jennifer Hunt <[email protected]>");
MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_IUCV);
| linux-master | net/iucv/af_iucv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IUCV base infrastructure.
*
* Copyright IBM Corp. 2001, 2009
*
* Author(s):
* Original source:
* Alan Altmark ([email protected]) Sept. 2000
* Xenia Tkatschow ([email protected])
* 2Gb awareness and general cleanup:
* Fritz Elfert ([email protected], [email protected])
* Rewritten for af_iucv:
* Martin Schwidefsky <[email protected]>
* PM functions:
* Ursula Braun ([email protected])
*
* Documentation used:
* The original source
* CP Programming Service, IBM document # SC24-5760
*/
#define KMSG_COMPONENT "iucv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/reboot.h>
#include <net/iucv/iucv.h>
#include <linux/atomic.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/smp.h>
/*
* FLAGS:
* All flags are defined in the field IPFLAGS1 of each function
* and can be found in CP Programming Services.
* IPSRCCLS - Indicates you have specified a source class.
* IPTRGCLS - Indicates you have specified a target class.
* IPFGPID - Indicates you have specified a pathid.
* IPFGMID - Indicates you have specified a message ID.
* IPNORPY - Indicates a one-way message. No reply expected.
* IPALL - Indicates that all paths are affected.
*/
#define IUCV_IPSRCCLS 0x01
#define IUCV_IPTRGCLS 0x01
#define IUCV_IPFGPID 0x02
#define IUCV_IPFGMID 0x04
#define IUCV_IPNORPY 0x10
#define IUCV_IPALL 0x80
static int iucv_bus_match(struct device *dev, struct device_driver *drv)
{
return 0;
}
struct bus_type iucv_bus = {
.name = "iucv",
.match = iucv_bus_match,
};
EXPORT_SYMBOL(iucv_bus);
struct device *iucv_root;
EXPORT_SYMBOL(iucv_root);
static int iucv_available;
/* General IUCV interrupt structure */
struct iucv_irq_data {
u16 ippathid;
u8 ipflags1;
u8 iptype;
u32 res2[9];
};
struct iucv_irq_list {
struct list_head list;
struct iucv_irq_data data;
};
static struct iucv_irq_data *iucv_irq_data[NR_CPUS];
static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE };
static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE };
/*
* Queue of interrupt buffers lock for delivery via the tasklet
* (fast but can't call smp_call_function).
*/
static LIST_HEAD(iucv_task_queue);
/*
* The tasklet for fast delivery of iucv interrupts.
*/
static void iucv_tasklet_fn(unsigned long);
static DECLARE_TASKLET_OLD(iucv_tasklet, iucv_tasklet_fn);
/*
* Queue of interrupt buffers for delivery via a work queue
* (slower but can call smp_call_function).
*/
static LIST_HEAD(iucv_work_queue);
/*
* The work element to deliver path pending interrupts.
*/
static void iucv_work_fn(struct work_struct *work);
static DECLARE_WORK(iucv_work, iucv_work_fn);
/*
* Spinlock protecting task and work queue.
*/
static DEFINE_SPINLOCK(iucv_queue_lock);
enum iucv_command_codes {
IUCV_QUERY = 0,
IUCV_RETRIEVE_BUFFER = 2,
IUCV_SEND = 4,
IUCV_RECEIVE = 5,
IUCV_REPLY = 6,
IUCV_REJECT = 8,
IUCV_PURGE = 9,
IUCV_ACCEPT = 10,
IUCV_CONNECT = 11,
IUCV_DECLARE_BUFFER = 12,
IUCV_QUIESCE = 13,
IUCV_RESUME = 14,
IUCV_SEVER = 15,
IUCV_SETMASK = 16,
IUCV_SETCONTROLMASK = 17,
};
/*
* Error messages that are used with the iucv_sever function. They get
* converted to EBCDIC.
*/
static char iucv_error_no_listener[16] = "NO LISTENER";
static char iucv_error_no_memory[16] = "NO MEMORY";
static char iucv_error_pathid[16] = "INVALID PATHID";
/*
* iucv_handler_list: List of registered handlers.
*/
static LIST_HEAD(iucv_handler_list);
/*
* iucv_path_table: an array of iucv_path structures.
*/
static struct iucv_path **iucv_path_table;
static unsigned long iucv_max_pathid;
/*
* iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table
*/
static DEFINE_SPINLOCK(iucv_table_lock);
/*
* iucv_active_cpu: contains the number of the cpu executing the tasklet
* or the work handler. Needed for iucv_path_sever called from tasklet.
*/
static int iucv_active_cpu = -1;
/*
* Mutex and wait queue for iucv_register/iucv_unregister.
*/
static DEFINE_MUTEX(iucv_register_mutex);
/*
* Counter for number of non-smp capable handlers.
*/
static int iucv_nonsmp_handler;
/*
* IUCV control data structure. Used by iucv_path_accept, iucv_path_connect,
* iucv_path_quiesce and iucv_path_sever.
*/
struct iucv_cmd_control {
u16 ippathid;
u8 ipflags1;
u8 iprcode;
u16 ipmsglim;
u16 res1;
u8 ipvmid[8];
u8 ipuser[16];
u8 iptarget[8];
} __attribute__ ((packed,aligned(8)));
/*
* Data in parameter list iucv structure. Used by iucv_message_send,
* iucv_message_send2way and iucv_message_reply.
*/
struct iucv_cmd_dpl {
u16 ippathid;
u8 ipflags1;
u8 iprcode;
u32 ipmsgid;
u32 iptrgcls;
u8 iprmmsg[8];
u32 ipsrccls;
u32 ipmsgtag;
u32 ipbfadr2;
u32 ipbfln2f;
u32 res;
} __attribute__ ((packed,aligned(8)));
/*
* Data in buffer iucv structure. Used by iucv_message_receive,
* iucv_message_reject, iucv_message_send, iucv_message_send2way
* and iucv_declare_cpu.
*/
struct iucv_cmd_db {
u16 ippathid;
u8 ipflags1;
u8 iprcode;
u32 ipmsgid;
u32 iptrgcls;
u32 ipbfadr1;
u32 ipbfln1f;
u32 ipsrccls;
u32 ipmsgtag;
u32 ipbfadr2;
u32 ipbfln2f;
u32 res;
} __attribute__ ((packed,aligned(8)));
/*
* Purge message iucv structure. Used by iucv_message_purge.
*/
struct iucv_cmd_purge {
u16 ippathid;
u8 ipflags1;
u8 iprcode;
u32 ipmsgid;
u8 ipaudit[3];
u8 res1[5];
u32 res2;
u32 ipsrccls;
u32 ipmsgtag;
u32 res3[3];
} __attribute__ ((packed,aligned(8)));
/*
* Set mask iucv structure. Used by iucv_enable_cpu.
*/
struct iucv_cmd_set_mask {
u8 ipmask;
u8 res1[2];
u8 iprcode;
u32 res2[9];
} __attribute__ ((packed,aligned(8)));
union iucv_param {
struct iucv_cmd_control ctrl;
struct iucv_cmd_dpl dpl;
struct iucv_cmd_db db;
struct iucv_cmd_purge purge;
struct iucv_cmd_set_mask set_mask;
};
/*
* Anchor for per-cpu IUCV command parameter block.
*/
static union iucv_param *iucv_param[NR_CPUS];
static union iucv_param *iucv_param_irq[NR_CPUS];
/**
* __iucv_call_b2f0
* @command: identifier of IUCV call to CP.
* @parm: pointer to a struct iucv_parm block
*
* Calls CP to execute IUCV commands.
*
* Returns the result of the CP IUCV call.
*/
static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
{
int cc;
asm volatile(
" lgr 0,%[reg0]\n"
" lgr 1,%[reg1]\n"
" .long 0xb2f01000\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc), "+m" (*parm)
: [reg0] "d" ((unsigned long)command),
[reg1] "d" ((unsigned long)parm)
: "cc", "0", "1");
return cc;
}
static inline int iucv_call_b2f0(int command, union iucv_param *parm)
{
int ccode;
ccode = __iucv_call_b2f0(command, parm);
return ccode == 1 ? parm->ctrl.iprcode : ccode;
}
/*
* iucv_query_maxconn
*
* Determines the maximum number of connections that may be established.
*
* Returns the maximum number of connections or -EPERM is IUCV is not
* available.
*/
static int __iucv_query_maxconn(void *param, unsigned long *max_pathid)
{
unsigned long reg1 = virt_to_phys(param);
int cc;
asm volatile (
" lghi 0,%[cmd]\n"
" lgr 1,%[reg1]\n"
" .long 0xb2f01000\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
" lgr %[reg1],1\n"
: [cc] "=&d" (cc), [reg1] "+&d" (reg1)
: [cmd] "K" (IUCV_QUERY)
: "cc", "0", "1");
*max_pathid = reg1;
return cc;
}
static int iucv_query_maxconn(void)
{
unsigned long max_pathid;
void *param;
int ccode;
param = kzalloc(sizeof(union iucv_param), GFP_KERNEL | GFP_DMA);
if (!param)
return -ENOMEM;
ccode = __iucv_query_maxconn(param, &max_pathid);
if (ccode == 0)
iucv_max_pathid = max_pathid;
kfree(param);
return ccode ? -EPERM : 0;
}
/**
* iucv_allow_cpu
* @data: unused
*
* Allow iucv interrupts on this cpu.
*/
static void iucv_allow_cpu(void *data)
{
int cpu = smp_processor_id();
union iucv_param *parm;
/*
* Enable all iucv interrupts.
* ipmask contains bits for the different interrupts
* 0x80 - Flag to allow nonpriority message pending interrupts
* 0x40 - Flag to allow priority message pending interrupts
* 0x20 - Flag to allow nonpriority message completion interrupts
* 0x10 - Flag to allow priority message completion interrupts
* 0x08 - Flag to allow IUCV control interrupts
*/
parm = iucv_param_irq[cpu];
memset(parm, 0, sizeof(union iucv_param));
parm->set_mask.ipmask = 0xf8;
iucv_call_b2f0(IUCV_SETMASK, parm);
/*
* Enable all iucv control interrupts.
* ipmask contains bits for the different interrupts
* 0x80 - Flag to allow pending connections interrupts
* 0x40 - Flag to allow connection complete interrupts
* 0x20 - Flag to allow connection severed interrupts
* 0x10 - Flag to allow connection quiesced interrupts
* 0x08 - Flag to allow connection resumed interrupts
*/
memset(parm, 0, sizeof(union iucv_param));
parm->set_mask.ipmask = 0xf8;
iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
/* Set indication that iucv interrupts are allowed for this cpu. */
cpumask_set_cpu(cpu, &iucv_irq_cpumask);
}
/**
* iucv_block_cpu
* @data: unused
*
* Block iucv interrupts on this cpu.
*/
static void iucv_block_cpu(void *data)
{
int cpu = smp_processor_id();
union iucv_param *parm;
/* Disable all iucv interrupts. */
parm = iucv_param_irq[cpu];
memset(parm, 0, sizeof(union iucv_param));
iucv_call_b2f0(IUCV_SETMASK, parm);
/* Clear indication that iucv interrupts are allowed for this cpu. */
cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
}
/**
* iucv_declare_cpu
* @data: unused
*
* Declare a interrupt buffer on this cpu.
*/
static void iucv_declare_cpu(void *data)
{
int cpu = smp_processor_id();
union iucv_param *parm;
int rc;
if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
return;
/* Declare interrupt buffer. */
parm = iucv_param_irq[cpu];
memset(parm, 0, sizeof(union iucv_param));
parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
if (rc) {
char *err = "Unknown";
switch (rc) {
case 0x03:
err = "Directory error";
break;
case 0x0a:
err = "Invalid length";
break;
case 0x13:
err = "Buffer already exists";
break;
case 0x3e:
err = "Buffer overlap";
break;
case 0x5c:
err = "Paging or storage error";
break;
}
pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n",
cpu, rc, err);
return;
}
/* Set indication that an iucv buffer exists for this cpu. */
cpumask_set_cpu(cpu, &iucv_buffer_cpumask);
if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask))
/* Enable iucv interrupts on this cpu. */
iucv_allow_cpu(NULL);
else
/* Disable iucv interrupts on this cpu. */
iucv_block_cpu(NULL);
}
/**
* iucv_retrieve_cpu
* @data: unused
*
* Retrieve interrupt buffer on this cpu.
*/
static void iucv_retrieve_cpu(void *data)
{
int cpu = smp_processor_id();
union iucv_param *parm;
if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
return;
/* Block iucv interrupts. */
iucv_block_cpu(NULL);
/* Retrieve interrupt buffer. */
parm = iucv_param_irq[cpu];
iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
/* Clear indication that an iucv buffer exists for this cpu. */
cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
}
/*
* iucv_setmask_mp
*
* Allow iucv interrupts on all cpus.
*/
static void iucv_setmask_mp(void)
{
int cpu;
cpus_read_lock();
for_each_online_cpu(cpu)
/* Enable all cpus with a declared buffer. */
if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
!cpumask_test_cpu(cpu, &iucv_irq_cpumask))
smp_call_function_single(cpu, iucv_allow_cpu,
NULL, 1);
cpus_read_unlock();
}
/*
* iucv_setmask_up
*
* Allow iucv interrupts on a single cpu.
*/
static void iucv_setmask_up(void)
{
cpumask_t cpumask;
int cpu;
/* Disable all cpu but the first in cpu_irq_cpumask. */
cpumask_copy(&cpumask, &iucv_irq_cpumask);
cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
for_each_cpu(cpu, &cpumask)
smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
}
/*
* iucv_enable
*
* This function makes iucv ready for use. It allocates the pathid
* table, declares an iucv interrupt buffer and enables the iucv
* interrupts. Called when the first user has registered an iucv
* handler.
*/
static int iucv_enable(void)
{
size_t alloc_size;
int cpu, rc;
cpus_read_lock();
rc = -ENOMEM;
alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
if (!iucv_path_table)
goto out;
/* Declare per cpu buffers. */
rc = -EIO;
for_each_online_cpu(cpu)
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
if (cpumask_empty(&iucv_buffer_cpumask))
/* No cpu could declare an iucv buffer. */
goto out;
cpus_read_unlock();
return 0;
out:
kfree(iucv_path_table);
iucv_path_table = NULL;
cpus_read_unlock();
return rc;
}
/*
* iucv_disable
*
* This function shuts down iucv. It disables iucv interrupts, retrieves
* the iucv interrupt buffer and frees the pathid table. Called after the
* last user unregister its iucv handler.
*/
static void iucv_disable(void)
{
cpus_read_lock();
on_each_cpu(iucv_retrieve_cpu, NULL, 1);
kfree(iucv_path_table);
iucv_path_table = NULL;
cpus_read_unlock();
}
static int iucv_cpu_dead(unsigned int cpu)
{
kfree(iucv_param_irq[cpu]);
iucv_param_irq[cpu] = NULL;
kfree(iucv_param[cpu]);
iucv_param[cpu] = NULL;
kfree(iucv_irq_data[cpu]);
iucv_irq_data[cpu] = NULL;
return 0;
}
static int iucv_cpu_prepare(unsigned int cpu)
{
/* Note: GFP_DMA used to get memory below 2G */
iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
if (!iucv_irq_data[cpu])
goto out_free;
/* Allocate parameter blocks. */
iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
if (!iucv_param[cpu])
goto out_free;
iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
if (!iucv_param_irq[cpu])
goto out_free;
return 0;
out_free:
iucv_cpu_dead(cpu);
return -ENOMEM;
}
static int iucv_cpu_online(unsigned int cpu)
{
if (!iucv_path_table)
return 0;
iucv_declare_cpu(NULL);
return 0;
}
static int iucv_cpu_down_prep(unsigned int cpu)
{
cpumask_t cpumask;
if (!iucv_path_table)
return 0;
cpumask_copy(&cpumask, &iucv_buffer_cpumask);
cpumask_clear_cpu(cpu, &cpumask);
if (cpumask_empty(&cpumask))
/* Can't offline last IUCV enabled cpu. */
return -EINVAL;
iucv_retrieve_cpu(NULL);
if (!cpumask_empty(&iucv_irq_cpumask))
return 0;
smp_call_function_single(cpumask_first(&iucv_buffer_cpumask),
iucv_allow_cpu, NULL, 1);
return 0;
}
/**
* iucv_sever_pathid
* @pathid: path identification number.
* @userdata: 16-bytes of user data.
*
* Sever an iucv path to free up the pathid. Used internally.
*/
static int iucv_sever_pathid(u16 pathid, u8 *userdata)
{
union iucv_param *parm;
parm = iucv_param_irq[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (userdata)
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
parm->ctrl.ippathid = pathid;
return iucv_call_b2f0(IUCV_SEVER, parm);
}
/**
* __iucv_cleanup_queue
* @dummy: unused dummy argument
*
* Nop function called via smp_call_function to force work items from
* pending external iucv interrupts to the work queue.
*/
static void __iucv_cleanup_queue(void *dummy)
{
}
/**
* iucv_cleanup_queue
*
* Function called after a path has been severed to find all remaining
* work items for the now stale pathid. The caller needs to hold the
* iucv_table_lock.
*/
static void iucv_cleanup_queue(void)
{
struct iucv_irq_list *p, *n;
/*
* When a path is severed, the pathid can be reused immediately
* on a iucv connect or a connection pending interrupt. Remove
* all entries from the task queue that refer to a stale pathid
* (iucv_path_table[ix] == NULL). Only then do the iucv connect
* or deliver the connection pending interrupt. To get all the
* pending interrupts force them to the work queue by calling
* an empty function on all cpus.
*/
smp_call_function(__iucv_cleanup_queue, NULL, 1);
spin_lock_irq(&iucv_queue_lock);
list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
/* Remove stale work items from the task queue. */
if (iucv_path_table[p->data.ippathid] == NULL) {
list_del(&p->list);
kfree(p);
}
}
spin_unlock_irq(&iucv_queue_lock);
}
/**
* iucv_register:
* @handler: address of iucv handler structure
* @smp: != 0 indicates that the handler can deal with out of order messages
*
* Registers a driver with IUCV.
*
* Returns 0 on success, -ENOMEM if the memory allocation for the pathid
* table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
*/
int iucv_register(struct iucv_handler *handler, int smp)
{
int rc;
if (!iucv_available)
return -ENOSYS;
mutex_lock(&iucv_register_mutex);
if (!smp)
iucv_nonsmp_handler++;
if (list_empty(&iucv_handler_list)) {
rc = iucv_enable();
if (rc)
goto out_mutex;
} else if (!smp && iucv_nonsmp_handler == 1)
iucv_setmask_up();
INIT_LIST_HEAD(&handler->paths);
spin_lock_bh(&iucv_table_lock);
list_add_tail(&handler->list, &iucv_handler_list);
spin_unlock_bh(&iucv_table_lock);
rc = 0;
out_mutex:
mutex_unlock(&iucv_register_mutex);
return rc;
}
EXPORT_SYMBOL(iucv_register);
/**
* iucv_unregister
* @handler: address of iucv handler structure
* @smp: != 0 indicates that the handler can deal with out of order messages
*
* Unregister driver from IUCV.
*/
void iucv_unregister(struct iucv_handler *handler, int smp)
{
struct iucv_path *p, *n;
mutex_lock(&iucv_register_mutex);
spin_lock_bh(&iucv_table_lock);
/* Remove handler from the iucv_handler_list. */
list_del_init(&handler->list);
/* Sever all pathids still referring to the handler. */
list_for_each_entry_safe(p, n, &handler->paths, list) {
iucv_sever_pathid(p->pathid, NULL);
iucv_path_table[p->pathid] = NULL;
list_del(&p->list);
iucv_path_free(p);
}
spin_unlock_bh(&iucv_table_lock);
if (!smp)
iucv_nonsmp_handler--;
if (list_empty(&iucv_handler_list))
iucv_disable();
else if (!smp && iucv_nonsmp_handler == 0)
iucv_setmask_mp();
mutex_unlock(&iucv_register_mutex);
}
EXPORT_SYMBOL(iucv_unregister);
static int iucv_reboot_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
int i;
if (cpumask_empty(&iucv_irq_cpumask))
return NOTIFY_DONE;
cpus_read_lock();
on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1);
preempt_disable();
for (i = 0; i < iucv_max_pathid; i++) {
if (iucv_path_table[i])
iucv_sever_pathid(i, NULL);
}
preempt_enable();
cpus_read_unlock();
iucv_disable();
return NOTIFY_DONE;
}
static struct notifier_block iucv_reboot_notifier = {
.notifier_call = iucv_reboot_event,
};
/**
* iucv_path_accept
* @path: address of iucv path structure
* @handler: address of iucv handler structure
* @userdata: 16 bytes of data reflected to the communication partner
* @private: private data passed to interrupt handlers for this path
*
* This function is issued after the user received a connection pending
* external interrupt and now wishes to complete the IUCV communication path.
*
* Returns the result of the CP IUCV call.
*/
int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
u8 *userdata, void *private)
{
union iucv_param *parm;
int rc;
local_bh_disable();
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
/* Prepare parameter block. */
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->ctrl.ippathid = path->pathid;
parm->ctrl.ipmsglim = path->msglim;
if (userdata)
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
parm->ctrl.ipflags1 = path->flags;
rc = iucv_call_b2f0(IUCV_ACCEPT, parm);
if (!rc) {
path->private = private;
path->msglim = parm->ctrl.ipmsglim;
path->flags = parm->ctrl.ipflags1;
}
out:
local_bh_enable();
return rc;
}
EXPORT_SYMBOL(iucv_path_accept);
/**
* iucv_path_connect
* @path: address of iucv path structure
* @handler: address of iucv handler structure
* @userid: 8-byte user identification
* @system: 8-byte target system identification
* @userdata: 16 bytes of data reflected to the communication partner
* @private: private data passed to interrupt handlers for this path
*
* This function establishes an IUCV path. Although the connect may complete
* successfully, you are not able to use the path until you receive an IUCV
* Connection Complete external interrupt.
*
* Returns the result of the CP IUCV call.
*/
int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
u8 *userid, u8 *system, u8 *userdata,
void *private)
{
union iucv_param *parm;
int rc;
spin_lock_bh(&iucv_table_lock);
iucv_cleanup_queue();
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->ctrl.ipmsglim = path->msglim;
parm->ctrl.ipflags1 = path->flags;
if (userid) {
memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid));
ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
}
if (system) {
memcpy(parm->ctrl.iptarget, system,
sizeof(parm->ctrl.iptarget));
ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
}
if (userdata)
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
rc = iucv_call_b2f0(IUCV_CONNECT, parm);
if (!rc) {
if (parm->ctrl.ippathid < iucv_max_pathid) {
path->pathid = parm->ctrl.ippathid;
path->msglim = parm->ctrl.ipmsglim;
path->flags = parm->ctrl.ipflags1;
path->handler = handler;
path->private = private;
list_add_tail(&path->list, &handler->paths);
iucv_path_table[path->pathid] = path;
} else {
iucv_sever_pathid(parm->ctrl.ippathid,
iucv_error_pathid);
rc = -EIO;
}
}
out:
spin_unlock_bh(&iucv_table_lock);
return rc;
}
EXPORT_SYMBOL(iucv_path_connect);
/**
* iucv_path_quiesce:
* @path: address of iucv path structure
* @userdata: 16 bytes of data reflected to the communication partner
*
* This function temporarily suspends incoming messages on an IUCV path.
* You can later reactivate the path by invoking the iucv_resume function.
*
* Returns the result from the CP IUCV call.
*/
int iucv_path_quiesce(struct iucv_path *path, u8 *userdata)
{
union iucv_param *parm;
int rc;
local_bh_disable();
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (userdata)
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
parm->ctrl.ippathid = path->pathid;
rc = iucv_call_b2f0(IUCV_QUIESCE, parm);
out:
local_bh_enable();
return rc;
}
EXPORT_SYMBOL(iucv_path_quiesce);
/**
* iucv_path_resume:
* @path: address of iucv path structure
* @userdata: 16 bytes of data reflected to the communication partner
*
* This function resumes incoming messages on an IUCV path that has
* been stopped with iucv_path_quiesce.
*
* Returns the result from the CP IUCV call.
*/
int iucv_path_resume(struct iucv_path *path, u8 *userdata)
{
union iucv_param *parm;
int rc;
local_bh_disable();
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (userdata)
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
parm->ctrl.ippathid = path->pathid;
rc = iucv_call_b2f0(IUCV_RESUME, parm);
out:
local_bh_enable();
return rc;
}
/**
* iucv_path_sever
* @path: address of iucv path structure
* @userdata: 16 bytes of data reflected to the communication partner
*
* This function terminates an IUCV path.
*
* Returns the result from the CP IUCV call.
*/
int iucv_path_sever(struct iucv_path *path, u8 *userdata)
{
int rc;
preempt_disable();
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
if (iucv_active_cpu != smp_processor_id())
spin_lock_bh(&iucv_table_lock);
rc = iucv_sever_pathid(path->pathid, userdata);
iucv_path_table[path->pathid] = NULL;
list_del_init(&path->list);
if (iucv_active_cpu != smp_processor_id())
spin_unlock_bh(&iucv_table_lock);
out:
preempt_enable();
return rc;
}
EXPORT_SYMBOL(iucv_path_sever);
/**
* iucv_message_purge
* @path: address of iucv path structure
* @msg: address of iucv msg structure
* @srccls: source class of message
*
* Cancels a message you have sent.
*
* Returns the result from the CP IUCV call.
*/
int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
u32 srccls)
{
union iucv_param *parm;
int rc;
local_bh_disable();
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->purge.ippathid = path->pathid;
parm->purge.ipmsgid = msg->id;
parm->purge.ipsrccls = srccls;
parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID;
rc = iucv_call_b2f0(IUCV_PURGE, parm);
if (!rc) {
msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8;
msg->tag = parm->purge.ipmsgtag;
}
out:
local_bh_enable();
return rc;
}
EXPORT_SYMBOL(iucv_message_purge);
/**
* iucv_message_receive_iprmdata
* @path: address of iucv path structure
* @msg: address of iucv msg structure
* @flags: how the message is received (IUCV_IPBUFLST)
* @buffer: address of data buffer or address of struct iucv_array
* @size: length of data buffer
* @residual:
*
* Internal function used by iucv_message_receive and __iucv_message_receive
* to receive RMDATA data stored in struct iucv_message.
*/
static int iucv_message_receive_iprmdata(struct iucv_path *path,
struct iucv_message *msg,
u8 flags, void *buffer,
size_t size, size_t *residual)
{
struct iucv_array *array;
u8 *rmmsg;
size_t copy;
/*
* Message is 8 bytes long and has been stored to the
* message descriptor itself.
*/
if (residual)
*residual = abs(size - 8);
rmmsg = msg->rmmsg;
if (flags & IUCV_IPBUFLST) {
/* Copy to struct iucv_array. */
size = (size < 8) ? size : 8;
for (array = buffer; size > 0; array++) {
copy = min_t(size_t, size, array->length);
memcpy((u8 *)(addr_t) array->address,
rmmsg, copy);
rmmsg += copy;
size -= copy;
}
} else {
/* Copy to direct buffer. */
memcpy(buffer, rmmsg, min_t(size_t, size, 8));
}
return 0;
}
/**
* __iucv_message_receive
* @path: address of iucv path structure
* @msg: address of iucv msg structure
* @flags: how the message is received (IUCV_IPBUFLST)
* @buffer: address of data buffer or address of struct iucv_array
* @size: length of data buffer
* @residual:
*
* This function receives messages that are being sent to you over
* established paths. This function will deal with RMDATA messages
* embedded in struct iucv_message as well.
*
* Locking: no locking
*
* Returns the result from the CP IUCV call.
*/
int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *buffer, size_t size, size_t *residual)
{
union iucv_param *parm;
int rc;
if (msg->flags & IUCV_IPRMDATA)
return iucv_message_receive_iprmdata(path, msg, flags,
buffer, size, residual);
if (cpumask_empty(&iucv_buffer_cpumask))
return -EIO;
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->db.ipbfadr1 = (u32)(addr_t) buffer;
parm->db.ipbfln1f = (u32) size;
parm->db.ipmsgid = msg->id;
parm->db.ippathid = path->pathid;
parm->db.iptrgcls = msg->class;
parm->db.ipflags1 = (flags | IUCV_IPFGPID |
IUCV_IPFGMID | IUCV_IPTRGCLS);
rc = iucv_call_b2f0(IUCV_RECEIVE, parm);
if (!rc || rc == 5) {
msg->flags = parm->db.ipflags1;
if (residual)
*residual = parm->db.ipbfln1f;
}
return rc;
}
EXPORT_SYMBOL(__iucv_message_receive);
/**
* iucv_message_receive
* @path: address of iucv path structure
* @msg: address of iucv msg structure
* @flags: how the message is received (IUCV_IPBUFLST)
* @buffer: address of data buffer or address of struct iucv_array
* @size: length of data buffer
* @residual:
*
* This function receives messages that are being sent to you over
* established paths. This function will deal with RMDATA messages
* embedded in struct iucv_message as well.
*
* Locking: local_bh_enable/local_bh_disable
*
* Returns the result from the CP IUCV call.
*/
int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *buffer, size_t size, size_t *residual)
{
int rc;
if (msg->flags & IUCV_IPRMDATA)
return iucv_message_receive_iprmdata(path, msg, flags,
buffer, size, residual);
local_bh_disable();
rc = __iucv_message_receive(path, msg, flags, buffer, size, residual);
local_bh_enable();
return rc;
}
EXPORT_SYMBOL(iucv_message_receive);
/**
* iucv_message_reject
* @path: address of iucv path structure
* @msg: address of iucv msg structure
*
* The reject function refuses a specified message. Between the time you
* are notified of a message and the time that you complete the message,
* the message may be rejected.
*
* Returns the result from the CP IUCV call.
*/
int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
{
union iucv_param *parm;
int rc;
local_bh_disable();
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->db.ippathid = path->pathid;
parm->db.ipmsgid = msg->id;
parm->db.iptrgcls = msg->class;
parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID);
rc = iucv_call_b2f0(IUCV_REJECT, parm);
out:
local_bh_enable();
return rc;
}
EXPORT_SYMBOL(iucv_message_reject);
/**
* iucv_message_reply
* @path: address of iucv path structure
* @msg: address of iucv msg structure
* @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
* @reply: address of reply data buffer or address of struct iucv_array
* @size: length of reply data buffer
*
* This function responds to the two-way messages that you receive. You
* must identify completely the message to which you wish to reply. ie,
* pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
* the parameter list.
*
* Returns the result from the CP IUCV call.
*/
int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *reply, size_t size)
{
union iucv_param *parm;
int rc;
local_bh_disable();
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (flags & IUCV_IPRMDATA) {
parm->dpl.ippathid = path->pathid;
parm->dpl.ipflags1 = flags;
parm->dpl.ipmsgid = msg->id;
parm->dpl.iptrgcls = msg->class;
memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
} else {
parm->db.ipbfadr1 = (u32)(addr_t) reply;
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags;
parm->db.ipmsgid = msg->id;
parm->db.iptrgcls = msg->class;
}
rc = iucv_call_b2f0(IUCV_REPLY, parm);
out:
local_bh_enable();
return rc;
}
EXPORT_SYMBOL(iucv_message_reply);
/**
* __iucv_message_send
* @path: address of iucv path structure
* @msg: address of iucv msg structure
* @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
* @srccls: source class of message
* @buffer: address of send buffer or address of struct iucv_array
* @size: length of send buffer
*
* This function transmits data to another application. Data to be
* transmitted is in a buffer and this is a one-way message and the
* receiver will not reply to the message.
*
* Locking: no locking
*
* Returns the result from the CP IUCV call.
*/
int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size)
{
union iucv_param *parm;
int rc;
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (flags & IUCV_IPRMDATA) {
/* Message of 8 bytes can be placed into the parameter list. */
parm->dpl.ippathid = path->pathid;
parm->dpl.ipflags1 = flags | IUCV_IPNORPY;
parm->dpl.iptrgcls = msg->class;
parm->dpl.ipsrccls = srccls;
parm->dpl.ipmsgtag = msg->tag;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
parm->db.ipbfadr1 = (u32)(addr_t) buffer;
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags | IUCV_IPNORPY;
parm->db.iptrgcls = msg->class;
parm->db.ipsrccls = srccls;
parm->db.ipmsgtag = msg->tag;
}
rc = iucv_call_b2f0(IUCV_SEND, parm);
if (!rc)
msg->id = parm->db.ipmsgid;
out:
return rc;
}
EXPORT_SYMBOL(__iucv_message_send);
/**
* iucv_message_send
* @path: address of iucv path structure
* @msg: address of iucv msg structure
* @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
* @srccls: source class of message
* @buffer: address of send buffer or address of struct iucv_array
* @size: length of send buffer
*
* This function transmits data to another application. Data to be
* transmitted is in a buffer and this is a one-way message and the
* receiver will not reply to the message.
*
* Locking: local_bh_enable/local_bh_disable
*
* Returns the result from the CP IUCV call.
*/
int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size)
{
int rc;
local_bh_disable();
rc = __iucv_message_send(path, msg, flags, srccls, buffer, size);
local_bh_enable();
return rc;
}
EXPORT_SYMBOL(iucv_message_send);
/**
* iucv_message_send2way
* @path: address of iucv path structure
* @msg: address of iucv msg structure
* @flags: how the message is sent and the reply is received
* (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST)
* @srccls: source class of message
* @buffer: address of send buffer or address of struct iucv_array
* @size: length of send buffer
* @answer: address of answer buffer or address of struct iucv_array
* @asize: size of reply buffer
* @residual: ignored
*
* This function transmits data to another application. Data to be
* transmitted is in a buffer. The receiver of the send is expected to
* reply to the message and a buffer is provided into which IUCV moves
* the reply to this message.
*
* Returns the result from the CP IUCV call.
*/
int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size,
void *answer, size_t asize, size_t *residual)
{
union iucv_param *parm;
int rc;
local_bh_disable();
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (flags & IUCV_IPRMDATA) {
parm->dpl.ippathid = path->pathid;
parm->dpl.ipflags1 = path->flags; /* priority message */
parm->dpl.iptrgcls = msg->class;
parm->dpl.ipsrccls = srccls;
parm->dpl.ipmsgtag = msg->tag;
parm->dpl.ipbfadr2 = (u32)(addr_t) answer;
parm->dpl.ipbfln2f = (u32) asize;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = path->flags; /* priority message */
parm->db.iptrgcls = msg->class;
parm->db.ipsrccls = srccls;
parm->db.ipmsgtag = msg->tag;
parm->db.ipbfadr1 = (u32)(addr_t) buffer;
parm->db.ipbfln1f = (u32) size;
parm->db.ipbfadr2 = (u32)(addr_t) answer;
parm->db.ipbfln2f = (u32) asize;
}
rc = iucv_call_b2f0(IUCV_SEND, parm);
if (!rc)
msg->id = parm->db.ipmsgid;
out:
local_bh_enable();
return rc;
}
EXPORT_SYMBOL(iucv_message_send2way);
struct iucv_path_pending {
u16 ippathid;
u8 ipflags1;
u8 iptype;
u16 ipmsglim;
u16 res1;
u8 ipvmid[8];
u8 ipuser[16];
u32 res3;
u8 ippollfg;
u8 res4[3];
} __packed;
/**
* iucv_path_pending
* @data: Pointer to external interrupt buffer
*
* Process connection pending work item. Called from tasklet while holding
* iucv_table_lock.
*/
static void iucv_path_pending(struct iucv_irq_data *data)
{
struct iucv_path_pending *ipp = (void *) data;
struct iucv_handler *handler;
struct iucv_path *path;
char *error;
BUG_ON(iucv_path_table[ipp->ippathid]);
/* New pathid, handler found. Create a new path struct. */
error = iucv_error_no_memory;
path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC);
if (!path)
goto out_sever;
path->pathid = ipp->ippathid;
iucv_path_table[path->pathid] = path;
EBCASC(ipp->ipvmid, 8);
/* Call registered handler until one is found that wants the path. */
list_for_each_entry(handler, &iucv_handler_list, list) {
if (!handler->path_pending)
continue;
/*
* Add path to handler to allow a call to iucv_path_sever
* inside the path_pending function. If the handler returns
* an error remove the path from the handler again.
*/
list_add(&path->list, &handler->paths);
path->handler = handler;
if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser))
return;
list_del(&path->list);
path->handler = NULL;
}
/* No handler wanted the path. */
iucv_path_table[path->pathid] = NULL;
iucv_path_free(path);
error = iucv_error_no_listener;
out_sever:
iucv_sever_pathid(ipp->ippathid, error);
}
struct iucv_path_complete {
u16 ippathid;
u8 ipflags1;
u8 iptype;
u16 ipmsglim;
u16 res1;
u8 res2[8];
u8 ipuser[16];
u32 res3;
u8 ippollfg;
u8 res4[3];
} __packed;
/**
* iucv_path_complete
* @data: Pointer to external interrupt buffer
*
* Process connection complete work item. Called from tasklet while holding
* iucv_table_lock.
*/
static void iucv_path_complete(struct iucv_irq_data *data)
{
struct iucv_path_complete *ipc = (void *) data;
struct iucv_path *path = iucv_path_table[ipc->ippathid];
if (path)
path->flags = ipc->ipflags1;
if (path && path->handler && path->handler->path_complete)
path->handler->path_complete(path, ipc->ipuser);
}
struct iucv_path_severed {
u16 ippathid;
u8 res1;
u8 iptype;
u32 res2;
u8 res3[8];
u8 ipuser[16];
u32 res4;
u8 ippollfg;
u8 res5[3];
} __packed;
/**
* iucv_path_severed
* @data: Pointer to external interrupt buffer
*
* Process connection severed work item. Called from tasklet while holding
* iucv_table_lock.
*/
static void iucv_path_severed(struct iucv_irq_data *data)
{
struct iucv_path_severed *ips = (void *) data;
struct iucv_path *path = iucv_path_table[ips->ippathid];
if (!path || !path->handler) /* Already severed */
return;
if (path->handler->path_severed)
path->handler->path_severed(path, ips->ipuser);
else {
iucv_sever_pathid(path->pathid, NULL);
iucv_path_table[path->pathid] = NULL;
list_del(&path->list);
iucv_path_free(path);
}
}
struct iucv_path_quiesced {
u16 ippathid;
u8 res1;
u8 iptype;
u32 res2;
u8 res3[8];
u8 ipuser[16];
u32 res4;
u8 ippollfg;
u8 res5[3];
} __packed;
/**
* iucv_path_quiesced
* @data: Pointer to external interrupt buffer
*
* Process connection quiesced work item. Called from tasklet while holding
* iucv_table_lock.
*/
static void iucv_path_quiesced(struct iucv_irq_data *data)
{
struct iucv_path_quiesced *ipq = (void *) data;
struct iucv_path *path = iucv_path_table[ipq->ippathid];
if (path && path->handler && path->handler->path_quiesced)
path->handler->path_quiesced(path, ipq->ipuser);
}
struct iucv_path_resumed {
u16 ippathid;
u8 res1;
u8 iptype;
u32 res2;
u8 res3[8];
u8 ipuser[16];
u32 res4;
u8 ippollfg;
u8 res5[3];
} __packed;
/**
* iucv_path_resumed
* @data: Pointer to external interrupt buffer
*
* Process connection resumed work item. Called from tasklet while holding
* iucv_table_lock.
*/
static void iucv_path_resumed(struct iucv_irq_data *data)
{
struct iucv_path_resumed *ipr = (void *) data;
struct iucv_path *path = iucv_path_table[ipr->ippathid];
if (path && path->handler && path->handler->path_resumed)
path->handler->path_resumed(path, ipr->ipuser);
}
struct iucv_message_complete {
u16 ippathid;
u8 ipflags1;
u8 iptype;
u32 ipmsgid;
u32 ipaudit;
u8 iprmmsg[8];
u32 ipsrccls;
u32 ipmsgtag;
u32 res;
u32 ipbfln2f;
u8 ippollfg;
u8 res2[3];
} __packed;
/**
* iucv_message_complete
* @data: Pointer to external interrupt buffer
*
* Process message complete work item. Called from tasklet while holding
* iucv_table_lock.
*/
static void iucv_message_complete(struct iucv_irq_data *data)
{
struct iucv_message_complete *imc = (void *) data;
struct iucv_path *path = iucv_path_table[imc->ippathid];
struct iucv_message msg;
if (path && path->handler && path->handler->message_complete) {
msg.flags = imc->ipflags1;
msg.id = imc->ipmsgid;
msg.audit = imc->ipaudit;
memcpy(msg.rmmsg, imc->iprmmsg, 8);
msg.class = imc->ipsrccls;
msg.tag = imc->ipmsgtag;
msg.length = imc->ipbfln2f;
path->handler->message_complete(path, &msg);
}
}
struct iucv_message_pending {
u16 ippathid;
u8 ipflags1;
u8 iptype;
u32 ipmsgid;
u32 iptrgcls;
struct {
union {
u32 iprmmsg1_u32;
u8 iprmmsg1[4];
} ln1msg1;
union {
u32 ipbfln1f;
u8 iprmmsg2[4];
} ln1msg2;
} rmmsg;
u32 res1[3];
u32 ipbfln2f;
u8 ippollfg;
u8 res2[3];
} __packed;
/**
* iucv_message_pending
* @data: Pointer to external interrupt buffer
*
* Process message pending work item. Called from tasklet while holding
* iucv_table_lock.
*/
static void iucv_message_pending(struct iucv_irq_data *data)
{
struct iucv_message_pending *imp = (void *) data;
struct iucv_path *path = iucv_path_table[imp->ippathid];
struct iucv_message msg;
if (path && path->handler && path->handler->message_pending) {
msg.flags = imp->ipflags1;
msg.id = imp->ipmsgid;
msg.class = imp->iptrgcls;
if (imp->ipflags1 & IUCV_IPRMDATA) {
memcpy(msg.rmmsg, &imp->rmmsg, 8);
msg.length = 8;
} else
msg.length = imp->rmmsg.ln1msg2.ipbfln1f;
msg.reply_size = imp->ipbfln2f;
path->handler->message_pending(path, &msg);
}
}
/*
* iucv_tasklet_fn:
*
* This tasklet loops over the queue of irq buffers created by
* iucv_external_interrupt, calls the appropriate action handler
* and then frees the buffer.
*/
static void iucv_tasklet_fn(unsigned long ignored)
{
typedef void iucv_irq_fn(struct iucv_irq_data *);
static iucv_irq_fn *irq_fn[] = {
[0x02] = iucv_path_complete,
[0x03] = iucv_path_severed,
[0x04] = iucv_path_quiesced,
[0x05] = iucv_path_resumed,
[0x06] = iucv_message_complete,
[0x07] = iucv_message_complete,
[0x08] = iucv_message_pending,
[0x09] = iucv_message_pending,
};
LIST_HEAD(task_queue);
struct iucv_irq_list *p, *n;
/* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
if (!spin_trylock(&iucv_table_lock)) {
tasklet_schedule(&iucv_tasklet);
return;
}
iucv_active_cpu = smp_processor_id();
spin_lock_irq(&iucv_queue_lock);
list_splice_init(&iucv_task_queue, &task_queue);
spin_unlock_irq(&iucv_queue_lock);
list_for_each_entry_safe(p, n, &task_queue, list) {
list_del_init(&p->list);
irq_fn[p->data.iptype](&p->data);
kfree(p);
}
iucv_active_cpu = -1;
spin_unlock(&iucv_table_lock);
}
/*
* iucv_work_fn:
*
* This work function loops over the queue of path pending irq blocks
* created by iucv_external_interrupt, calls the appropriate action
* handler and then frees the buffer.
*/
static void iucv_work_fn(struct work_struct *work)
{
LIST_HEAD(work_queue);
struct iucv_irq_list *p, *n;
/* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
spin_lock_bh(&iucv_table_lock);
iucv_active_cpu = smp_processor_id();
spin_lock_irq(&iucv_queue_lock);
list_splice_init(&iucv_work_queue, &work_queue);
spin_unlock_irq(&iucv_queue_lock);
iucv_cleanup_queue();
list_for_each_entry_safe(p, n, &work_queue, list) {
list_del_init(&p->list);
iucv_path_pending(&p->data);
kfree(p);
}
iucv_active_cpu = -1;
spin_unlock_bh(&iucv_table_lock);
}
/*
* iucv_external_interrupt
*
* Handles external interrupts coming in from CP.
* Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
*/
static void iucv_external_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct iucv_irq_data *p;
struct iucv_irq_list *work;
inc_irq_stat(IRQEXT_IUC);
p = iucv_irq_data[smp_processor_id()];
if (p->ippathid >= iucv_max_pathid) {
WARN_ON(p->ippathid >= iucv_max_pathid);
iucv_sever_pathid(p->ippathid, iucv_error_no_listener);
return;
}
BUG_ON(p->iptype < 0x01 || p->iptype > 0x09);
work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC);
if (!work) {
pr_warn("iucv_external_interrupt: out of memory\n");
return;
}
memcpy(&work->data, p, sizeof(work->data));
spin_lock(&iucv_queue_lock);
if (p->iptype == 0x01) {
/* Path pending interrupt. */
list_add_tail(&work->list, &iucv_work_queue);
schedule_work(&iucv_work);
} else {
/* The other interrupts. */
list_add_tail(&work->list, &iucv_task_queue);
tasklet_schedule(&iucv_tasklet);
}
spin_unlock(&iucv_queue_lock);
}
struct iucv_interface iucv_if = {
.message_receive = iucv_message_receive,
.__message_receive = __iucv_message_receive,
.message_reply = iucv_message_reply,
.message_reject = iucv_message_reject,
.message_send = iucv_message_send,
.__message_send = __iucv_message_send,
.message_send2way = iucv_message_send2way,
.message_purge = iucv_message_purge,
.path_accept = iucv_path_accept,
.path_connect = iucv_path_connect,
.path_quiesce = iucv_path_quiesce,
.path_resume = iucv_path_resume,
.path_sever = iucv_path_sever,
.iucv_register = iucv_register,
.iucv_unregister = iucv_unregister,
.bus = NULL,
.root = NULL,
};
EXPORT_SYMBOL(iucv_if);
static enum cpuhp_state iucv_online;
/**
* iucv_init
*
* Allocates and initializes various data structures.
*/
static int __init iucv_init(void)
{
int rc;
if (!MACHINE_IS_VM) {
rc = -EPROTONOSUPPORT;
goto out;
}
ctl_set_bit(0, 1);
rc = iucv_query_maxconn();
if (rc)
goto out_ctl;
rc = register_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
if (rc)
goto out_ctl;
iucv_root = root_device_register("iucv");
if (IS_ERR(iucv_root)) {
rc = PTR_ERR(iucv_root);
goto out_int;
}
rc = cpuhp_setup_state(CPUHP_NET_IUCV_PREPARE, "net/iucv:prepare",
iucv_cpu_prepare, iucv_cpu_dead);
if (rc)
goto out_dev;
rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "net/iucv:online",
iucv_cpu_online, iucv_cpu_down_prep);
if (rc < 0)
goto out_prep;
iucv_online = rc;
rc = register_reboot_notifier(&iucv_reboot_notifier);
if (rc)
goto out_remove_hp;
ASCEBC(iucv_error_no_listener, 16);
ASCEBC(iucv_error_no_memory, 16);
ASCEBC(iucv_error_pathid, 16);
iucv_available = 1;
rc = bus_register(&iucv_bus);
if (rc)
goto out_reboot;
iucv_if.root = iucv_root;
iucv_if.bus = &iucv_bus;
return 0;
out_reboot:
unregister_reboot_notifier(&iucv_reboot_notifier);
out_remove_hp:
cpuhp_remove_state(iucv_online);
out_prep:
cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE);
out_dev:
root_device_unregister(iucv_root);
out_int:
unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
out_ctl:
ctl_clear_bit(0, 1);
out:
return rc;
}
/**
* iucv_exit
*
* Frees everything allocated from iucv_init.
*/
static void __exit iucv_exit(void)
{
struct iucv_irq_list *p, *n;
spin_lock_irq(&iucv_queue_lock);
list_for_each_entry_safe(p, n, &iucv_task_queue, list)
kfree(p);
list_for_each_entry_safe(p, n, &iucv_work_queue, list)
kfree(p);
spin_unlock_irq(&iucv_queue_lock);
unregister_reboot_notifier(&iucv_reboot_notifier);
cpuhp_remove_state_nocalls(iucv_online);
cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE);
root_device_unregister(iucv_root);
bus_unregister(&iucv_bus);
unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
}
subsys_initcall(iucv_init);
module_exit(iucv_exit);
MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert ([email protected])");
MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
MODULE_LICENSE("GPL");
| linux-master | net/iucv/iucv.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spmi.h>
/*
* SPMI register addr
*/
#define SPMI_CHANNEL_OFFSET 0x0300
#define SPMI_SLAVE_OFFSET 0x20
#define SPMI_APB_SPMI_CMD_BASE_ADDR 0x0100
#define SPMI_APB_SPMI_WDATA0_BASE_ADDR 0x0104
#define SPMI_APB_SPMI_WDATA1_BASE_ADDR 0x0108
#define SPMI_APB_SPMI_WDATA2_BASE_ADDR 0x010c
#define SPMI_APB_SPMI_WDATA3_BASE_ADDR 0x0110
#define SPMI_APB_SPMI_STATUS_BASE_ADDR 0x0200
#define SPMI_APB_SPMI_RDATA0_BASE_ADDR 0x0204
#define SPMI_APB_SPMI_RDATA1_BASE_ADDR 0x0208
#define SPMI_APB_SPMI_RDATA2_BASE_ADDR 0x020c
#define SPMI_APB_SPMI_RDATA3_BASE_ADDR 0x0210
#define SPMI_PER_DATAREG_BYTE 4
/*
* SPMI cmd register
*/
#define SPMI_APB_SPMI_CMD_EN BIT(31)
#define SPMI_APB_SPMI_CMD_TYPE_OFFSET 24
#define SPMI_APB_SPMI_CMD_LENGTH_OFFSET 20
#define SPMI_APB_SPMI_CMD_SLAVEID_OFFSET 16
#define SPMI_APB_SPMI_CMD_ADDR_OFFSET 0
/* Command Opcodes */
enum spmi_controller_cmd_op_code {
SPMI_CMD_REG_ZERO_WRITE = 0,
SPMI_CMD_REG_WRITE = 1,
SPMI_CMD_REG_READ = 2,
SPMI_CMD_EXT_REG_WRITE = 3,
SPMI_CMD_EXT_REG_READ = 4,
SPMI_CMD_EXT_REG_WRITE_L = 5,
SPMI_CMD_EXT_REG_READ_L = 6,
SPMI_CMD_REG_RESET = 7,
SPMI_CMD_REG_SLEEP = 8,
SPMI_CMD_REG_SHUTDOWN = 9,
SPMI_CMD_REG_WAKEUP = 10,
};
/*
* SPMI status register
*/
#define SPMI_APB_TRANS_DONE BIT(0)
#define SPMI_APB_TRANS_FAIL BIT(2)
/* Command register fields */
#define SPMI_CONTROLLER_CMD_MAX_BYTE_COUNT 16
/* Maximum number of support PMIC peripherals */
#define SPMI_CONTROLLER_TIMEOUT_US 1000
#define SPMI_CONTROLLER_MAX_TRANS_BYTES 16
struct spmi_controller_dev {
struct spmi_controller *controller;
struct device *dev;
void __iomem *base;
spinlock_t lock;
u32 channel;
};
static int spmi_controller_wait_for_done(struct device *dev,
struct spmi_controller_dev *ctrl_dev,
void __iomem *base, u8 sid, u16 addr)
{
u32 timeout = SPMI_CONTROLLER_TIMEOUT_US;
u32 status, offset;
offset = SPMI_APB_SPMI_STATUS_BASE_ADDR;
offset += SPMI_CHANNEL_OFFSET * ctrl_dev->channel + SPMI_SLAVE_OFFSET * sid;
do {
status = readl(base + offset);
if (status & SPMI_APB_TRANS_DONE) {
if (status & SPMI_APB_TRANS_FAIL) {
dev_err(dev, "%s: transaction failed (0x%x)\n",
__func__, status);
return -EIO;
}
dev_dbg(dev, "%s: status 0x%x\n", __func__, status);
return 0;
}
udelay(1);
} while (timeout--);
dev_err(dev, "%s: timeout, status 0x%x\n", __func__, status);
return -ETIMEDOUT;
}
static int spmi_read_cmd(struct spmi_controller *ctrl,
u8 opc, u8 slave_id, u16 slave_addr, u8 *__buf, size_t bc)
{
struct spmi_controller_dev *spmi_controller = dev_get_drvdata(&ctrl->dev);
u32 chnl_ofst = SPMI_CHANNEL_OFFSET * spmi_controller->channel;
unsigned long flags;
u8 *buf = __buf;
u32 cmd, data;
int rc;
u8 op_code, i;
if (bc > SPMI_CONTROLLER_MAX_TRANS_BYTES) {
dev_err(&ctrl->dev,
"spmi_controller supports 1..%d bytes per trans, but:%zu requested\n",
SPMI_CONTROLLER_MAX_TRANS_BYTES, bc);
return -EINVAL;
}
switch (opc) {
case SPMI_CMD_READ:
op_code = SPMI_CMD_REG_READ;
break;
case SPMI_CMD_EXT_READ:
op_code = SPMI_CMD_EXT_REG_READ;
break;
case SPMI_CMD_EXT_READL:
op_code = SPMI_CMD_EXT_REG_READ_L;
break;
default:
dev_err(&ctrl->dev, "invalid read cmd 0x%x\n", opc);
return -EINVAL;
}
cmd = SPMI_APB_SPMI_CMD_EN |
(op_code << SPMI_APB_SPMI_CMD_TYPE_OFFSET) |
((bc - 1) << SPMI_APB_SPMI_CMD_LENGTH_OFFSET) |
((slave_id & 0xf) << SPMI_APB_SPMI_CMD_SLAVEID_OFFSET) | /* slvid */
((slave_addr & 0xffff) << SPMI_APB_SPMI_CMD_ADDR_OFFSET); /* slave_addr */
spin_lock_irqsave(&spmi_controller->lock, flags);
writel(cmd, spmi_controller->base + chnl_ofst + SPMI_APB_SPMI_CMD_BASE_ADDR);
rc = spmi_controller_wait_for_done(&ctrl->dev, spmi_controller,
spmi_controller->base, slave_id, slave_addr);
if (rc)
goto done;
for (i = 0; bc > i * SPMI_PER_DATAREG_BYTE; i++) {
data = readl(spmi_controller->base + chnl_ofst +
SPMI_SLAVE_OFFSET * slave_id +
SPMI_APB_SPMI_RDATA0_BASE_ADDR +
i * SPMI_PER_DATAREG_BYTE);
data = be32_to_cpu((__be32 __force)data);
if ((bc - i * SPMI_PER_DATAREG_BYTE) >> 2) {
memcpy(buf, &data, sizeof(data));
buf += sizeof(data);
} else {
memcpy(buf, &data, bc % SPMI_PER_DATAREG_BYTE);
buf += (bc % SPMI_PER_DATAREG_BYTE);
}
}
done:
spin_unlock_irqrestore(&spmi_controller->lock, flags);
if (rc)
dev_err(&ctrl->dev,
"spmi read wait timeout op:0x%x slave_id:%d slave_addr:0x%x bc:%zu\n",
opc, slave_id, slave_addr, bc + 1);
else
dev_dbg(&ctrl->dev, "%s: id:%d slave_addr:0x%x, read value: %*ph\n",
__func__, slave_id, slave_addr, (int)bc, __buf);
return rc;
}
static int spmi_write_cmd(struct spmi_controller *ctrl,
u8 opc, u8 slave_id, u16 slave_addr, const u8 *__buf, size_t bc)
{
struct spmi_controller_dev *spmi_controller = dev_get_drvdata(&ctrl->dev);
u32 chnl_ofst = SPMI_CHANNEL_OFFSET * spmi_controller->channel;
const u8 *buf = __buf;
unsigned long flags;
u32 cmd, data;
int rc;
u8 op_code, i;
if (bc > SPMI_CONTROLLER_MAX_TRANS_BYTES) {
dev_err(&ctrl->dev,
"spmi_controller supports 1..%d bytes per trans, but:%zu requested\n",
SPMI_CONTROLLER_MAX_TRANS_BYTES, bc);
return -EINVAL;
}
switch (opc) {
case SPMI_CMD_WRITE:
op_code = SPMI_CMD_REG_WRITE;
break;
case SPMI_CMD_EXT_WRITE:
op_code = SPMI_CMD_EXT_REG_WRITE;
break;
case SPMI_CMD_EXT_WRITEL:
op_code = SPMI_CMD_EXT_REG_WRITE_L;
break;
default:
dev_err(&ctrl->dev, "invalid write cmd 0x%x\n", opc);
return -EINVAL;
}
cmd = SPMI_APB_SPMI_CMD_EN |
(op_code << SPMI_APB_SPMI_CMD_TYPE_OFFSET) |
((bc - 1) << SPMI_APB_SPMI_CMD_LENGTH_OFFSET) |
((slave_id & 0xf) << SPMI_APB_SPMI_CMD_SLAVEID_OFFSET) |
((slave_addr & 0xffff) << SPMI_APB_SPMI_CMD_ADDR_OFFSET);
/* Write data to FIFOs */
spin_lock_irqsave(&spmi_controller->lock, flags);
for (i = 0; bc > i * SPMI_PER_DATAREG_BYTE; i++) {
data = 0;
if ((bc - i * SPMI_PER_DATAREG_BYTE) >> 2) {
memcpy(&data, buf, sizeof(data));
buf += sizeof(data);
} else {
memcpy(&data, buf, bc % SPMI_PER_DATAREG_BYTE);
buf += (bc % SPMI_PER_DATAREG_BYTE);
}
writel((u32 __force)cpu_to_be32(data),
spmi_controller->base + chnl_ofst +
SPMI_APB_SPMI_WDATA0_BASE_ADDR +
SPMI_PER_DATAREG_BYTE * i);
}
/* Start the transaction */
writel(cmd, spmi_controller->base + chnl_ofst + SPMI_APB_SPMI_CMD_BASE_ADDR);
rc = spmi_controller_wait_for_done(&ctrl->dev, spmi_controller,
spmi_controller->base, slave_id,
slave_addr);
spin_unlock_irqrestore(&spmi_controller->lock, flags);
if (rc)
dev_err(&ctrl->dev, "spmi write wait timeout op:0x%x slave_id:%d slave_addr:0x%x bc:%zu\n",
opc, slave_id, slave_addr, bc);
else
dev_dbg(&ctrl->dev, "%s: id:%d slave_addr:0x%x, wrote value: %*ph\n",
__func__, slave_id, slave_addr, (int)bc, __buf);
return rc;
}
static int spmi_controller_probe(struct platform_device *pdev)
{
struct spmi_controller_dev *spmi_controller;
struct spmi_controller *ctrl;
struct resource *iores;
int ret;
ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*spmi_controller));
if (!ctrl) {
dev_err(&pdev->dev, "can not allocate spmi_controller data\n");
return -ENOMEM;
}
spmi_controller = spmi_controller_get_drvdata(ctrl);
spmi_controller->controller = ctrl;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) {
dev_err(&pdev->dev, "can not get resource!\n");
ret = -EINVAL;
goto err_put_controller;
}
spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
resource_size(iores));
if (!spmi_controller->base) {
dev_err(&pdev->dev, "can not remap base addr!\n");
ret = -EADDRNOTAVAIL;
goto err_put_controller;
}
ret = of_property_read_u32(pdev->dev.of_node, "hisilicon,spmi-channel",
&spmi_controller->channel);
if (ret) {
dev_err(&pdev->dev, "can not get channel\n");
ret = -ENODEV;
goto err_put_controller;
}
platform_set_drvdata(pdev, spmi_controller);
dev_set_drvdata(&ctrl->dev, spmi_controller);
spin_lock_init(&spmi_controller->lock);
ctrl->nr = spmi_controller->channel;
ctrl->dev.parent = pdev->dev.parent;
ctrl->dev.of_node = of_node_get(pdev->dev.of_node);
/* Callbacks */
ctrl->read_cmd = spmi_read_cmd;
ctrl->write_cmd = spmi_write_cmd;
ret = spmi_controller_add(ctrl);
if (ret) {
dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
goto err_put_controller;
}
return 0;
err_put_controller:
spmi_controller_put(ctrl);
return ret;
}
static void spmi_del_controller(struct platform_device *pdev)
{
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
spmi_controller_remove(ctrl);
spmi_controller_put(ctrl);
}
static const struct of_device_id spmi_controller_match_table[] = {
{
.compatible = "hisilicon,kirin970-spmi-controller",
},
{}
};
MODULE_DEVICE_TABLE(of, spmi_controller_match_table);
static struct platform_driver spmi_controller_driver = {
.probe = spmi_controller_probe,
.remove_new = spmi_del_controller,
.driver = {
.name = "hisi_spmi_controller",
.of_match_table = spmi_controller_match_table,
},
};
static int __init spmi_controller_init(void)
{
return platform_driver_register(&spmi_controller_driver);
}
postcore_initcall(spmi_controller_init);
static void __exit spmi_controller_exit(void)
{
platform_driver_unregister(&spmi_controller_driver);
}
module_exit(spmi_controller_exit);
MODULE_LICENSE("GPL v2");
MODULE_VERSION("1.0");
MODULE_ALIAS("platform:spmi_controller");
| linux-master | drivers/spmi/hisi-spmi-controller.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2015, 2017, 2021, The Linux Foundation. All rights reserved.
*/
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spmi.h>
/* PMIC Arbiter configuration registers */
#define PMIC_ARB_VERSION 0x0000
#define PMIC_ARB_VERSION_V2_MIN 0x20010000
#define PMIC_ARB_VERSION_V3_MIN 0x30000000
#define PMIC_ARB_VERSION_V5_MIN 0x50000000
#define PMIC_ARB_VERSION_V7_MIN 0x70000000
#define PMIC_ARB_INT_EN 0x0004
#define PMIC_ARB_FEATURES 0x0004
#define PMIC_ARB_FEATURES_PERIPH_MASK GENMASK(10, 0)
#define PMIC_ARB_FEATURES1 0x0008
/* PMIC Arbiter channel registers offsets */
#define PMIC_ARB_CMD 0x00
#define PMIC_ARB_CONFIG 0x04
#define PMIC_ARB_STATUS 0x08
#define PMIC_ARB_WDATA0 0x10
#define PMIC_ARB_WDATA1 0x14
#define PMIC_ARB_RDATA0 0x18
#define PMIC_ARB_RDATA1 0x1C
/* Mapping Table */
#define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N)))
#define SPMI_MAPPING_BIT_INDEX(X) (((X) >> 18) & 0xF)
#define SPMI_MAPPING_BIT_IS_0_FLAG(X) (((X) >> 17) & 0x1)
#define SPMI_MAPPING_BIT_IS_0_RESULT(X) (((X) >> 9) & 0xFF)
#define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1)
#define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF)
#define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */
#define PMIC_ARB_MAX_PPID BIT(12) /* PPID is 12bit */
#define PMIC_ARB_APID_VALID BIT(15)
#define PMIC_ARB_CHAN_IS_IRQ_OWNER(reg) ((reg) & BIT(24))
#define INVALID_EE 0xFF
/* Ownership Table */
#define SPMI_OWNERSHIP_PERIPH2OWNER(X) ((X) & 0x7)
/* Channel Status fields */
enum pmic_arb_chnl_status {
PMIC_ARB_STATUS_DONE = BIT(0),
PMIC_ARB_STATUS_FAILURE = BIT(1),
PMIC_ARB_STATUS_DENIED = BIT(2),
PMIC_ARB_STATUS_DROPPED = BIT(3),
};
/* Command register fields */
#define PMIC_ARB_CMD_MAX_BYTE_COUNT 8
/* Command Opcodes */
enum pmic_arb_cmd_op_code {
PMIC_ARB_OP_EXT_WRITEL = 0,
PMIC_ARB_OP_EXT_READL = 1,
PMIC_ARB_OP_EXT_WRITE = 2,
PMIC_ARB_OP_RESET = 3,
PMIC_ARB_OP_SLEEP = 4,
PMIC_ARB_OP_SHUTDOWN = 5,
PMIC_ARB_OP_WAKEUP = 6,
PMIC_ARB_OP_AUTHENTICATE = 7,
PMIC_ARB_OP_MSTR_READ = 8,
PMIC_ARB_OP_MSTR_WRITE = 9,
PMIC_ARB_OP_EXT_READ = 13,
PMIC_ARB_OP_WRITE = 14,
PMIC_ARB_OP_READ = 15,
PMIC_ARB_OP_ZERO_WRITE = 16,
};
/*
* PMIC arbiter version 5 uses different register offsets for read/write vs
* observer channels.
*/
enum pmic_arb_channel {
PMIC_ARB_CHANNEL_RW,
PMIC_ARB_CHANNEL_OBS,
};
/* Maximum number of support PMIC peripherals */
#define PMIC_ARB_MAX_PERIPHS 512
#define PMIC_ARB_MAX_PERIPHS_V7 1024
#define PMIC_ARB_TIMEOUT_US 1000
#define PMIC_ARB_MAX_TRANS_BYTES (8)
#define PMIC_ARB_APID_MASK 0xFF
#define PMIC_ARB_PPID_MASK 0xFFF
/* interrupt enable bit */
#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
#define spec_to_hwirq(slave_id, periph_id, irq_id, apid) \
((((slave_id) & 0xF) << 28) | \
(((periph_id) & 0xFF) << 20) | \
(((irq_id) & 0x7) << 16) | \
(((apid) & 0x3FF) << 0))
#define hwirq_to_sid(hwirq) (((hwirq) >> 28) & 0xF)
#define hwirq_to_per(hwirq) (((hwirq) >> 20) & 0xFF)
#define hwirq_to_irq(hwirq) (((hwirq) >> 16) & 0x7)
#define hwirq_to_apid(hwirq) (((hwirq) >> 0) & 0x3FF)
struct pmic_arb_ver_ops;
struct apid_data {
u16 ppid;
u8 write_ee;
u8 irq_ee;
};
/**
* struct spmi_pmic_arb - SPMI PMIC Arbiter object
*
* @rd_base: on v1 "core", on v2 "observer" register base off DT.
* @wr_base: on v1 "core", on v2 "chnls" register base off DT.
* @intr: address of the SPMI interrupt control registers.
* @cnfg: address of the PMIC Arbiter configuration registers.
* @lock: lock to synchronize accesses.
* @channel: execution environment channel to use for accesses.
* @irq: PMIC ARB interrupt.
* @ee: the current Execution Environment
* @bus_instance: on v7: 0 = primary SPMI bus, 1 = secondary SPMI bus
* @min_apid: minimum APID (used for bounding IRQ search)
* @max_apid: maximum APID
* @base_apid: on v7: minimum APID associated with the particular SPMI
* bus instance
* @apid_count: on v5 and v7: number of APIDs associated with the
* particular SPMI bus instance
* @mapping_table: in-memory copy of PPID -> APID mapping table.
* @domain: irq domain object for PMIC IRQ domain
* @spmic: SPMI controller object
* @ver_ops: version dependent operations.
* @ppid_to_apid: in-memory copy of PPID -> APID mapping table.
* @last_apid: Highest value APID in use
* @apid_data: Table of data for all APIDs
* @max_periphs: Number of elements in apid_data[]
*/
struct spmi_pmic_arb {
void __iomem *rd_base;
void __iomem *wr_base;
void __iomem *intr;
void __iomem *cnfg;
void __iomem *core;
resource_size_t core_size;
raw_spinlock_t lock;
u8 channel;
int irq;
u8 ee;
u32 bus_instance;
u16 min_apid;
u16 max_apid;
u16 base_apid;
int apid_count;
u32 *mapping_table;
DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
struct irq_domain *domain;
struct spmi_controller *spmic;
const struct pmic_arb_ver_ops *ver_ops;
u16 *ppid_to_apid;
u16 last_apid;
struct apid_data *apid_data;
int max_periphs;
};
/**
* struct pmic_arb_ver_ops - version dependent functionality.
*
* @ver_str: version string.
* @ppid_to_apid: finds the apid for a given ppid.
* @non_data_cmd: on v1 issues an spmi non-data command.
* on v2 no HW support, returns -EOPNOTSUPP.
* @offset: on v1 offset of per-ee channel.
* on v2 offset of per-ee and per-ppid channel.
* @fmt_cmd: formats a GENI/SPMI command.
* @owner_acc_status: on v1 address of PMIC_ARB_SPMI_PIC_OWNERm_ACC_STATUSn
* on v2 address of SPMI_PIC_OWNERm_ACC_STATUSn.
* @acc_enable: on v1 address of PMIC_ARB_SPMI_PIC_ACC_ENABLEn
* on v2 address of SPMI_PIC_ACC_ENABLEn.
* @irq_status: on v1 address of PMIC_ARB_SPMI_PIC_IRQ_STATUSn
* on v2 address of SPMI_PIC_IRQ_STATUSn.
* @irq_clear: on v1 address of PMIC_ARB_SPMI_PIC_IRQ_CLEARn
* on v2 address of SPMI_PIC_IRQ_CLEARn.
* @apid_map_offset: offset of PMIC_ARB_REG_CHNLn
* @apid_owner: on v2 and later address of SPMI_PERIPHn_2OWNER_TABLE_REG
*/
struct pmic_arb_ver_ops {
const char *ver_str;
int (*ppid_to_apid)(struct spmi_pmic_arb *pmic_arb, u16 ppid);
/* spmi commands (read_cmd, write_cmd, cmd) functionality */
int (*offset)(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
enum pmic_arb_channel ch_type);
u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
/* Interrupts controller functionality (offset of PIC registers) */
void __iomem *(*owner_acc_status)(struct spmi_pmic_arb *pmic_arb, u8 m,
u16 n);
void __iomem *(*acc_enable)(struct spmi_pmic_arb *pmic_arb, u16 n);
void __iomem *(*irq_status)(struct spmi_pmic_arb *pmic_arb, u16 n);
void __iomem *(*irq_clear)(struct spmi_pmic_arb *pmic_arb, u16 n);
u32 (*apid_map_offset)(u16 n);
void __iomem *(*apid_owner)(struct spmi_pmic_arb *pmic_arb, u16 n);
};
static inline void pmic_arb_base_write(struct spmi_pmic_arb *pmic_arb,
u32 offset, u32 val)
{
writel_relaxed(val, pmic_arb->wr_base + offset);
}
static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb *pmic_arb,
u32 offset, u32 val)
{
writel_relaxed(val, pmic_arb->rd_base + offset);
}
/**
* pmic_arb_read_data: reads pmic-arb's register and copy 1..4 bytes to buf
* @bc: byte count -1. range: 0..3
* @reg: register's address
* @buf: output parameter, length must be bc + 1
*/
static void
pmic_arb_read_data(struct spmi_pmic_arb *pmic_arb, u8 *buf, u32 reg, u8 bc)
{
u32 data = __raw_readl(pmic_arb->rd_base + reg);
memcpy(buf, &data, (bc & 3) + 1);
}
/**
* pmic_arb_write_data: write 1..4 bytes from buf to pmic-arb's register
* @bc: byte-count -1. range: 0..3.
* @reg: register's address.
* @buf: buffer to write. length must be bc + 1.
*/
static void pmic_arb_write_data(struct spmi_pmic_arb *pmic_arb, const u8 *buf,
u32 reg, u8 bc)
{
u32 data = 0;
memcpy(&data, buf, (bc & 3) + 1);
__raw_writel(data, pmic_arb->wr_base + reg);
}
static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
void __iomem *base, u8 sid, u16 addr,
enum pmic_arb_channel ch_type)
{
struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
u32 status = 0;
u32 timeout = PMIC_ARB_TIMEOUT_US;
u32 offset;
int rc;
rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, ch_type);
if (rc < 0)
return rc;
offset = rc;
offset += PMIC_ARB_STATUS;
while (timeout--) {
status = readl_relaxed(base + offset);
if (status & PMIC_ARB_STATUS_DONE) {
if (status & PMIC_ARB_STATUS_DENIED) {
dev_err(&ctrl->dev, "%s: %#x %#x: transaction denied (%#x)\n",
__func__, sid, addr, status);
return -EPERM;
}
if (status & PMIC_ARB_STATUS_FAILURE) {
dev_err(&ctrl->dev, "%s: %#x %#x: transaction failed (%#x)\n",
__func__, sid, addr, status);
WARN_ON(1);
return -EIO;
}
if (status & PMIC_ARB_STATUS_DROPPED) {
dev_err(&ctrl->dev, "%s: %#x %#x: transaction dropped (%#x)\n",
__func__, sid, addr, status);
return -EIO;
}
return 0;
}
udelay(1);
}
dev_err(&ctrl->dev, "%s: %#x %#x: timeout, status %#x\n",
__func__, sid, addr, status);
return -ETIMEDOUT;
}
static int
pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
u32 cmd;
int rc;
u32 offset;
rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, PMIC_ARB_CHANNEL_RW);
if (rc < 0)
return rc;
offset = rc;
cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
raw_spin_lock_irqsave(&pmic_arb->lock, flags);
pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, 0,
PMIC_ARB_CHANNEL_RW);
raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
return rc;
}
static int
pmic_arb_non_data_cmd_v2(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
return -EOPNOTSUPP;
}
/* Non-data command */
static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
/* Check for valid non-data command */
if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
return -EINVAL;
return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid);
}
static int pmic_arb_fmt_read_cmd(struct spmi_pmic_arb *pmic_arb, u8 opc, u8 sid,
u16 addr, size_t len, u32 *cmd, u32 *offset)
{
u8 bc = len - 1;
int rc;
rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr,
PMIC_ARB_CHANNEL_OBS);
if (rc < 0)
return rc;
*offset = rc;
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
dev_err(&pmic_arb->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
PMIC_ARB_MAX_TRANS_BYTES, len);
return -EINVAL;
}
/* Check the opcode */
if (opc >= 0x60 && opc <= 0x7F)
opc = PMIC_ARB_OP_READ;
else if (opc >= 0x20 && opc <= 0x2F)
opc = PMIC_ARB_OP_EXT_READ;
else if (opc >= 0x38 && opc <= 0x3F)
opc = PMIC_ARB_OP_EXT_READL;
else
return -EINVAL;
*cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
return 0;
}
static int pmic_arb_read_cmd_unlocked(struct spmi_controller *ctrl, u32 cmd,
u32 offset, u8 sid, u16 addr, u8 *buf,
size_t len)
{
struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
u8 bc = len - 1;
int rc;
pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd);
rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr,
PMIC_ARB_CHANNEL_OBS);
if (rc)
return rc;
pmic_arb_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0,
min_t(u8, bc, 3));
if (bc > 3)
pmic_arb_read_data(pmic_arb, buf + 4, offset + PMIC_ARB_RDATA1,
bc - 4);
return 0;
}
static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u16 addr, u8 *buf, size_t len)
{
struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
u32 cmd, offset;
int rc;
rc = pmic_arb_fmt_read_cmd(pmic_arb, opc, sid, addr, len, &cmd,
&offset);
if (rc)
return rc;
raw_spin_lock_irqsave(&pmic_arb->lock, flags);
rc = pmic_arb_read_cmd_unlocked(ctrl, cmd, offset, sid, addr, buf, len);
raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
return rc;
}
static int pmic_arb_fmt_write_cmd(struct spmi_pmic_arb *pmic_arb, u8 opc,
u8 sid, u16 addr, size_t len, u32 *cmd,
u32 *offset)
{
u8 bc = len - 1;
int rc;
rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr,
PMIC_ARB_CHANNEL_RW);
if (rc < 0)
return rc;
*offset = rc;
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
dev_err(&pmic_arb->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
PMIC_ARB_MAX_TRANS_BYTES, len);
return -EINVAL;
}
/* Check the opcode */
if (opc >= 0x40 && opc <= 0x5F)
opc = PMIC_ARB_OP_WRITE;
else if (opc <= 0x0F)
opc = PMIC_ARB_OP_EXT_WRITE;
else if (opc >= 0x30 && opc <= 0x37)
opc = PMIC_ARB_OP_EXT_WRITEL;
else if (opc >= 0x80)
opc = PMIC_ARB_OP_ZERO_WRITE;
else
return -EINVAL;
*cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
return 0;
}
static int pmic_arb_write_cmd_unlocked(struct spmi_controller *ctrl, u32 cmd,
u32 offset, u8 sid, u16 addr,
const u8 *buf, size_t len)
{
struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
u8 bc = len - 1;
/* Write data to FIFOs */
pmic_arb_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0,
min_t(u8, bc, 3));
if (bc > 3)
pmic_arb_write_data(pmic_arb, buf + 4, offset + PMIC_ARB_WDATA1,
bc - 4);
/* Start the transaction */
pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
return pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr,
PMIC_ARB_CHANNEL_RW);
}
static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u16 addr, const u8 *buf, size_t len)
{
struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
u32 cmd, offset;
int rc;
rc = pmic_arb_fmt_write_cmd(pmic_arb, opc, sid, addr, len, &cmd,
&offset);
if (rc)
return rc;
raw_spin_lock_irqsave(&pmic_arb->lock, flags);
rc = pmic_arb_write_cmd_unlocked(ctrl, cmd, offset, sid, addr, buf,
len);
raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
return rc;
}
static int pmic_arb_masked_write(struct spmi_controller *ctrl, u8 sid, u16 addr,
const u8 *buf, const u8 *mask, size_t len)
{
struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
u32 read_cmd, read_offset, write_cmd, write_offset;
u8 temp[PMIC_ARB_MAX_TRANS_BYTES];
unsigned long flags;
int rc, i;
rc = pmic_arb_fmt_read_cmd(pmic_arb, SPMI_CMD_EXT_READL, sid, addr, len,
&read_cmd, &read_offset);
if (rc)
return rc;
rc = pmic_arb_fmt_write_cmd(pmic_arb, SPMI_CMD_EXT_WRITEL, sid, addr,
len, &write_cmd, &write_offset);
if (rc)
return rc;
raw_spin_lock_irqsave(&pmic_arb->lock, flags);
rc = pmic_arb_read_cmd_unlocked(ctrl, read_cmd, read_offset, sid, addr,
temp, len);
if (rc)
goto done;
for (i = 0; i < len; i++)
temp[i] = (temp[i] & ~mask[i]) | (buf[i] & mask[i]);
rc = pmic_arb_write_cmd_unlocked(ctrl, write_cmd, write_offset, sid,
addr, temp, len);
done:
raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
return rc;
}
enum qpnpint_regs {
QPNPINT_REG_RT_STS = 0x10,
QPNPINT_REG_SET_TYPE = 0x11,
QPNPINT_REG_POLARITY_HIGH = 0x12,
QPNPINT_REG_POLARITY_LOW = 0x13,
QPNPINT_REG_LATCHED_CLR = 0x14,
QPNPINT_REG_EN_SET = 0x15,
QPNPINT_REG_EN_CLR = 0x16,
QPNPINT_REG_LATCHED_STS = 0x18,
};
struct spmi_pmic_arb_qpnpint_type {
u8 type; /* 1 -> edge */
u8 polarity_high;
u8 polarity_low;
} __packed;
/* Simplified accessor functions for irqchip callbacks */
static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
size_t len)
{
struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
u8 sid = hwirq_to_sid(d->hwirq);
u8 per = hwirq_to_per(d->hwirq);
if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid,
(per << 8) + reg, buf, len))
dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x\n",
d->irq);
}
static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
{
struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
u8 sid = hwirq_to_sid(d->hwirq);
u8 per = hwirq_to_per(d->hwirq);
if (pmic_arb_read_cmd(pmic_arb->spmic, SPMI_CMD_EXT_READL, sid,
(per << 8) + reg, buf, len))
dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x\n",
d->irq);
}
static int qpnpint_spmi_masked_write(struct irq_data *d, u8 reg,
const void *buf, const void *mask,
size_t len)
{
struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
u8 sid = hwirq_to_sid(d->hwirq);
u8 per = hwirq_to_per(d->hwirq);
int rc;
rc = pmic_arb_masked_write(pmic_arb->spmic, sid, (per << 8) + reg, buf,
mask, len);
if (rc)
dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x rc=%d\n",
d->irq, rc);
return rc;
}
static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
{
u16 ppid = pmic_arb->apid_data[apid].ppid;
u8 sid = ppid >> 8;
u8 per = ppid & 0xFF;
u8 irq_mask = BIT(id);
dev_err_ratelimited(&pmic_arb->spmic->dev, "%s apid=%d sid=0x%x per=0x%x irq=%d\n",
__func__, apid, sid, per, id);
writel_relaxed(irq_mask, pmic_arb->ver_ops->irq_clear(pmic_arb, apid));
}
static int periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
{
unsigned int irq;
u32 status, id;
int handled = 0;
u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF;
u8 per = pmic_arb->apid_data[apid].ppid & 0xFF;
status = readl_relaxed(pmic_arb->ver_ops->irq_status(pmic_arb, apid));
while (status) {
id = ffs(status) - 1;
status &= ~BIT(id);
irq = irq_find_mapping(pmic_arb->domain,
spec_to_hwirq(sid, per, id, apid));
if (irq == 0) {
cleanup_irq(pmic_arb, apid, id);
continue;
}
generic_handle_irq(irq);
handled++;
}
return handled;
}
static void pmic_arb_chained_irq(struct irq_desc *desc)
{
struct spmi_pmic_arb *pmic_arb = irq_desc_get_handler_data(desc);
const struct pmic_arb_ver_ops *ver_ops = pmic_arb->ver_ops;
struct irq_chip *chip = irq_desc_get_chip(desc);
int first = pmic_arb->min_apid;
int last = pmic_arb->max_apid;
/*
* acc_offset will be non-zero for the secondary SPMI bus instance on
* v7 controllers.
*/
int acc_offset = pmic_arb->base_apid >> 5;
u8 ee = pmic_arb->ee;
u32 status, enable, handled = 0;
int i, id, apid;
/* status based dispatch */
bool acc_valid = false;
u32 irq_status = 0;
chained_irq_enter(chip, desc);
for (i = first >> 5; i <= last >> 5; ++i) {
status = readl_relaxed(ver_ops->owner_acc_status(pmic_arb, ee, i - acc_offset));
if (status)
acc_valid = true;
while (status) {
id = ffs(status) - 1;
status &= ~BIT(id);
apid = id + i * 32;
if (apid < first || apid > last) {
WARN_ONCE(true, "spurious spmi irq received for apid=%d\n",
apid);
continue;
}
enable = readl_relaxed(
ver_ops->acc_enable(pmic_arb, apid));
if (enable & SPMI_PIC_ACC_ENABLE_BIT)
if (periph_interrupt(pmic_arb, apid) != 0)
handled++;
}
}
/* ACC_STATUS is empty but IRQ fired check IRQ_STATUS */
if (!acc_valid) {
for (i = first; i <= last; i++) {
/* skip if APPS is not irq owner */
if (pmic_arb->apid_data[i].irq_ee != pmic_arb->ee)
continue;
irq_status = readl_relaxed(
ver_ops->irq_status(pmic_arb, i));
if (irq_status) {
enable = readl_relaxed(
ver_ops->acc_enable(pmic_arb, i));
if (enable & SPMI_PIC_ACC_ENABLE_BIT) {
dev_dbg(&pmic_arb->spmic->dev,
"Dispatching IRQ for apid=%d status=%x\n",
i, irq_status);
if (periph_interrupt(pmic_arb, i) != 0)
handled++;
}
}
}
}
if (handled == 0)
handle_bad_irq(desc);
chained_irq_exit(chip, desc);
}
static void qpnpint_irq_ack(struct irq_data *d)
{
struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
u8 irq = hwirq_to_irq(d->hwirq);
u16 apid = hwirq_to_apid(d->hwirq);
u8 data;
writel_relaxed(BIT(irq), pmic_arb->ver_ops->irq_clear(pmic_arb, apid));
data = BIT(irq);
qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
}
static void qpnpint_irq_mask(struct irq_data *d)
{
u8 irq = hwirq_to_irq(d->hwirq);
u8 data = BIT(irq);
qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
}
static void qpnpint_irq_unmask(struct irq_data *d)
{
struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
const struct pmic_arb_ver_ops *ver_ops = pmic_arb->ver_ops;
u8 irq = hwirq_to_irq(d->hwirq);
u16 apid = hwirq_to_apid(d->hwirq);
u8 buf[2];
writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT,
ver_ops->acc_enable(pmic_arb, apid));
qpnpint_spmi_read(d, QPNPINT_REG_EN_SET, &buf[0], 1);
if (!(buf[0] & BIT(irq))) {
/*
* Since the interrupt is currently disabled, write to both the
* LATCHED_CLR and EN_SET registers so that a spurious interrupt
* cannot be triggered when the interrupt is enabled
*/
buf[0] = BIT(irq);
buf[1] = BIT(irq);
qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 2);
}
}
static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct spmi_pmic_arb_qpnpint_type type = {0};
struct spmi_pmic_arb_qpnpint_type mask;
irq_flow_handler_t flow_handler;
u8 irq_bit = BIT(hwirq_to_irq(d->hwirq));
int rc;
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
type.type = irq_bit;
if (flow_type & IRQF_TRIGGER_RISING)
type.polarity_high = irq_bit;
if (flow_type & IRQF_TRIGGER_FALLING)
type.polarity_low = irq_bit;
flow_handler = handle_edge_irq;
} else {
if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
(flow_type & (IRQF_TRIGGER_LOW)))
return -EINVAL;
if (flow_type & IRQF_TRIGGER_HIGH)
type.polarity_high = irq_bit;
else
type.polarity_low = irq_bit;
flow_handler = handle_level_irq;
}
mask.type = irq_bit;
mask.polarity_high = irq_bit;
mask.polarity_low = irq_bit;
rc = qpnpint_spmi_masked_write(d, QPNPINT_REG_SET_TYPE, &type, &mask,
sizeof(type));
irq_set_handler_locked(d, flow_handler);
return rc;
}
static int qpnpint_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
return irq_set_irq_wake(pmic_arb->irq, on);
}
static int qpnpint_get_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which,
bool *state)
{
u8 irq = hwirq_to_irq(d->hwirq);
u8 status = 0;
if (which != IRQCHIP_STATE_LINE_LEVEL)
return -EINVAL;
qpnpint_spmi_read(d, QPNPINT_REG_RT_STS, &status, 1);
*state = !!(status & BIT(irq));
return 0;
}
static int qpnpint_irq_domain_activate(struct irq_domain *domain,
struct irq_data *d, bool reserve)
{
struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
u16 periph = hwirq_to_per(d->hwirq);
u16 apid = hwirq_to_apid(d->hwirq);
u16 sid = hwirq_to_sid(d->hwirq);
u16 irq = hwirq_to_irq(d->hwirq);
u8 buf;
if (pmic_arb->apid_data[apid].irq_ee != pmic_arb->ee) {
dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u: ee=%u but owner=%u\n",
sid, periph, irq, pmic_arb->ee,
pmic_arb->apid_data[apid].irq_ee);
return -ENODEV;
}
buf = BIT(irq);
qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &buf, 1);
qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 1);
return 0;
}
static struct irq_chip pmic_arb_irqchip = {
.name = "pmic_arb",
.irq_ack = qpnpint_irq_ack,
.irq_mask = qpnpint_irq_mask,
.irq_unmask = qpnpint_irq_unmask,
.irq_set_type = qpnpint_irq_set_type,
.irq_set_wake = qpnpint_irq_set_wake,
.irq_get_irqchip_state = qpnpint_get_irqchip_state,
.flags = IRQCHIP_MASK_ON_SUSPEND,
};
static int qpnpint_irq_domain_translate(struct irq_domain *d,
struct irq_fwspec *fwspec,
unsigned long *out_hwirq,
unsigned int *out_type)
{
struct spmi_pmic_arb *pmic_arb = d->host_data;
u32 *intspec = fwspec->param;
u16 apid, ppid;
int rc;
dev_dbg(&pmic_arb->spmic->dev, "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
intspec[0], intspec[1], intspec[2]);
if (irq_domain_get_of_node(d) != pmic_arb->spmic->dev.of_node)
return -EINVAL;
if (fwspec->param_count != 4)
return -EINVAL;
if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
return -EINVAL;
ppid = intspec[0] << 8 | intspec[1];
rc = pmic_arb->ver_ops->ppid_to_apid(pmic_arb, ppid);
if (rc < 0) {
dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u rc = %d\n",
intspec[0], intspec[1], intspec[2], rc);
return rc;
}
apid = rc;
/* Keep track of {max,min}_apid for bounding search during interrupt */
if (apid > pmic_arb->max_apid)
pmic_arb->max_apid = apid;
if (apid < pmic_arb->min_apid)
pmic_arb->min_apid = apid;
*out_hwirq = spec_to_hwirq(intspec[0], intspec[1], intspec[2], apid);
*out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
dev_dbg(&pmic_arb->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
return 0;
}
static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class;
static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq, unsigned int type)
{
irq_flow_handler_t handler;
dev_dbg(&pmic_arb->spmic->dev, "virq = %u, hwirq = %lu, type = %u\n",
virq, hwirq, type);
if (type & IRQ_TYPE_EDGE_BOTH)
handler = handle_edge_irq;
else
handler = handle_level_irq;
irq_set_lockdep_class(virq, &qpnpint_irq_lock_class,
&qpnpint_irq_request_class);
irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb,
handler, NULL, NULL);
}
static int qpnpint_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *data)
{
struct spmi_pmic_arb *pmic_arb = domain->host_data;
struct irq_fwspec *fwspec = data;
irq_hw_number_t hwirq;
unsigned int type;
int ret, i;
ret = qpnpint_irq_domain_translate(domain, fwspec, &hwirq, &type);
if (ret)
return ret;
for (i = 0; i < nr_irqs; i++)
qpnpint_irq_domain_map(pmic_arb, domain, virq + i, hwirq + i,
type);
return 0;
}
static int pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pmic_arb, u16 ppid)
{
u32 *mapping_table = pmic_arb->mapping_table;
int index = 0, i;
u16 apid_valid;
u16 apid;
u32 data;
apid_valid = pmic_arb->ppid_to_apid[ppid];
if (apid_valid & PMIC_ARB_APID_VALID) {
apid = apid_valid & ~PMIC_ARB_APID_VALID;
return apid;
}
for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
if (!test_and_set_bit(index, pmic_arb->mapping_table_valid))
mapping_table[index] = readl_relaxed(pmic_arb->cnfg +
SPMI_MAPPING_TABLE_REG(index));
data = mapping_table[index];
if (ppid & BIT(SPMI_MAPPING_BIT_INDEX(data))) {
if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
} else {
apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
pmic_arb->ppid_to_apid[ppid]
= apid | PMIC_ARB_APID_VALID;
pmic_arb->apid_data[apid].ppid = ppid;
return apid;
}
} else {
if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
} else {
apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
pmic_arb->ppid_to_apid[ppid]
= apid | PMIC_ARB_APID_VALID;
pmic_arb->apid_data[apid].ppid = ppid;
return apid;
}
}
}
return -ENODEV;
}
/* v1 offset per ee */
static int pmic_arb_offset_v1(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
enum pmic_arb_channel ch_type)
{
return 0x800 + 0x80 * pmic_arb->channel;
}
static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pmic_arb, u16 ppid)
{
struct apid_data *apidd = &pmic_arb->apid_data[pmic_arb->last_apid];
u32 regval, offset;
u16 id, apid;
for (apid = pmic_arb->last_apid; ; apid++, apidd++) {
offset = pmic_arb->ver_ops->apid_map_offset(apid);
if (offset >= pmic_arb->core_size)
break;
regval = readl_relaxed(pmic_arb->ver_ops->apid_owner(pmic_arb,
apid));
apidd->irq_ee = SPMI_OWNERSHIP_PERIPH2OWNER(regval);
apidd->write_ee = apidd->irq_ee;
regval = readl_relaxed(pmic_arb->core + offset);
if (!regval)
continue;
id = (regval >> 8) & PMIC_ARB_PPID_MASK;
pmic_arb->ppid_to_apid[id] = apid | PMIC_ARB_APID_VALID;
apidd->ppid = id;
if (id == ppid) {
apid |= PMIC_ARB_APID_VALID;
break;
}
}
pmic_arb->last_apid = apid & ~PMIC_ARB_APID_VALID;
return apid;
}
static int pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pmic_arb, u16 ppid)
{
u16 apid_valid;
apid_valid = pmic_arb->ppid_to_apid[ppid];
if (!(apid_valid & PMIC_ARB_APID_VALID))
apid_valid = pmic_arb_find_apid(pmic_arb, ppid);
if (!(apid_valid & PMIC_ARB_APID_VALID))
return -ENODEV;
return apid_valid & ~PMIC_ARB_APID_VALID;
}
static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb)
{
struct apid_data *apidd;
struct apid_data *prev_apidd;
u16 i, apid, ppid, apid_max;
bool valid, is_irq_ee;
u32 regval, offset;
/*
* In order to allow multiple EEs to write to a single PPID in arbiter
* version 5 and 7, there is more than one APID mapped to each PPID.
* The owner field for each of these mappings specifies the EE which is
* allowed to write to the APID. The owner of the last (highest) APID
* which has the IRQ owner bit set for a given PPID will receive
* interrupts from the PPID.
*
* In arbiter version 7, the APID numbering space is divided between
* the primary bus (0) and secondary bus (1) such that:
* APID = 0 to N-1 are assigned to the primary bus
* APID = N to N+M-1 are assigned to the secondary bus
* where N = number of APIDs supported by the primary bus and
* M = number of APIDs supported by the secondary bus
*/
apidd = &pmic_arb->apid_data[pmic_arb->base_apid];
apid_max = pmic_arb->base_apid + pmic_arb->apid_count;
for (i = pmic_arb->base_apid; i < apid_max; i++, apidd++) {
offset = pmic_arb->ver_ops->apid_map_offset(i);
if (offset >= pmic_arb->core_size)
break;
regval = readl_relaxed(pmic_arb->core + offset);
if (!regval)
continue;
ppid = (regval >> 8) & PMIC_ARB_PPID_MASK;
is_irq_ee = PMIC_ARB_CHAN_IS_IRQ_OWNER(regval);
regval = readl_relaxed(pmic_arb->ver_ops->apid_owner(pmic_arb,
i));
apidd->write_ee = SPMI_OWNERSHIP_PERIPH2OWNER(regval);
apidd->irq_ee = is_irq_ee ? apidd->write_ee : INVALID_EE;
valid = pmic_arb->ppid_to_apid[ppid] & PMIC_ARB_APID_VALID;
apid = pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID;
prev_apidd = &pmic_arb->apid_data[apid];
if (!valid || apidd->write_ee == pmic_arb->ee) {
/* First PPID mapping or one for this EE */
pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID;
} else if (valid && is_irq_ee &&
prev_apidd->write_ee == pmic_arb->ee) {
/*
* Duplicate PPID mapping after the one for this EE;
* override the irq owner
*/
prev_apidd->irq_ee = apidd->irq_ee;
}
apidd->ppid = ppid;
pmic_arb->last_apid = i;
}
/* Dump the mapping table for debug purposes. */
dev_dbg(&pmic_arb->spmic->dev, "PPID APID Write-EE IRQ-EE\n");
for (ppid = 0; ppid < PMIC_ARB_MAX_PPID; ppid++) {
apid = pmic_arb->ppid_to_apid[ppid];
if (apid & PMIC_ARB_APID_VALID) {
apid &= ~PMIC_ARB_APID_VALID;
apidd = &pmic_arb->apid_data[apid];
dev_dbg(&pmic_arb->spmic->dev, "%#03X %3u %2u %2u\n",
ppid, apid, apidd->write_ee, apidd->irq_ee);
}
}
return 0;
}
static int pmic_arb_ppid_to_apid_v5(struct spmi_pmic_arb *pmic_arb, u16 ppid)
{
if (!(pmic_arb->ppid_to_apid[ppid] & PMIC_ARB_APID_VALID))
return -ENODEV;
return pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID;
}
/* v2 offset per ppid and per ee */
static int pmic_arb_offset_v2(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
enum pmic_arb_channel ch_type)
{
u16 apid;
u16 ppid;
int rc;
ppid = sid << 8 | ((addr >> 8) & 0xFF);
rc = pmic_arb_ppid_to_apid_v2(pmic_arb, ppid);
if (rc < 0)
return rc;
apid = rc;
return 0x1000 * pmic_arb->ee + 0x8000 * apid;
}
/*
* v5 offset per ee and per apid for observer channels and per apid for
* read/write channels.
*/
static int pmic_arb_offset_v5(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
enum pmic_arb_channel ch_type)
{
u16 apid;
int rc;
u32 offset = 0;
u16 ppid = (sid << 8) | (addr >> 8);
rc = pmic_arb_ppid_to_apid_v5(pmic_arb, ppid);
if (rc < 0)
return rc;
apid = rc;
switch (ch_type) {
case PMIC_ARB_CHANNEL_OBS:
offset = 0x10000 * pmic_arb->ee + 0x80 * apid;
break;
case PMIC_ARB_CHANNEL_RW:
if (pmic_arb->apid_data[apid].write_ee != pmic_arb->ee) {
dev_err(&pmic_arb->spmic->dev, "disallowed SPMI write to sid=%u, addr=0x%04X\n",
sid, addr);
return -EPERM;
}
offset = 0x10000 * apid;
break;
}
return offset;
}
/*
* v7 offset per ee and per apid for observer channels and per apid for
* read/write channels.
*/
static int pmic_arb_offset_v7(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
enum pmic_arb_channel ch_type)
{
u16 apid;
int rc;
u32 offset = 0;
u16 ppid = (sid << 8) | (addr >> 8);
rc = pmic_arb->ver_ops->ppid_to_apid(pmic_arb, ppid);
if (rc < 0)
return rc;
apid = rc;
switch (ch_type) {
case PMIC_ARB_CHANNEL_OBS:
offset = 0x8000 * pmic_arb->ee + 0x20 * apid;
break;
case PMIC_ARB_CHANNEL_RW:
if (pmic_arb->apid_data[apid].write_ee != pmic_arb->ee) {
dev_err(&pmic_arb->spmic->dev, "disallowed SPMI write to sid=%u, addr=0x%04X\n",
sid, addr);
return -EPERM;
}
offset = 0x1000 * apid;
break;
}
return offset;
}
static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
{
return (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
}
static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc)
{
return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
}
static void __iomem *
pmic_arb_owner_acc_status_v1(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
{
return pmic_arb->intr + 0x20 * m + 0x4 * n;
}
static void __iomem *
pmic_arb_owner_acc_status_v2(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
{
return pmic_arb->intr + 0x100000 + 0x1000 * m + 0x4 * n;
}
static void __iomem *
pmic_arb_owner_acc_status_v3(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
{
return pmic_arb->intr + 0x200000 + 0x1000 * m + 0x4 * n;
}
static void __iomem *
pmic_arb_owner_acc_status_v5(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
{
return pmic_arb->intr + 0x10000 * m + 0x4 * n;
}
static void __iomem *
pmic_arb_owner_acc_status_v7(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
{
return pmic_arb->intr + 0x1000 * m + 0x4 * n;
}
static void __iomem *
pmic_arb_acc_enable_v1(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->intr + 0x200 + 0x4 * n;
}
static void __iomem *
pmic_arb_acc_enable_v2(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->intr + 0x1000 * n;
}
static void __iomem *
pmic_arb_acc_enable_v5(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->wr_base + 0x100 + 0x10000 * n;
}
static void __iomem *
pmic_arb_acc_enable_v7(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->wr_base + 0x100 + 0x1000 * n;
}
static void __iomem *
pmic_arb_irq_status_v1(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->intr + 0x600 + 0x4 * n;
}
static void __iomem *
pmic_arb_irq_status_v2(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->intr + 0x4 + 0x1000 * n;
}
static void __iomem *
pmic_arb_irq_status_v5(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->wr_base + 0x104 + 0x10000 * n;
}
static void __iomem *
pmic_arb_irq_status_v7(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->wr_base + 0x104 + 0x1000 * n;
}
static void __iomem *
pmic_arb_irq_clear_v1(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->intr + 0xA00 + 0x4 * n;
}
static void __iomem *
pmic_arb_irq_clear_v2(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->intr + 0x8 + 0x1000 * n;
}
static void __iomem *
pmic_arb_irq_clear_v5(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->wr_base + 0x108 + 0x10000 * n;
}
static void __iomem *
pmic_arb_irq_clear_v7(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->wr_base + 0x108 + 0x1000 * n;
}
static u32 pmic_arb_apid_map_offset_v2(u16 n)
{
return 0x800 + 0x4 * n;
}
static u32 pmic_arb_apid_map_offset_v5(u16 n)
{
return 0x900 + 0x4 * n;
}
static u32 pmic_arb_apid_map_offset_v7(u16 n)
{
return 0x2000 + 0x4 * n;
}
static void __iomem *
pmic_arb_apid_owner_v2(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->cnfg + 0x700 + 0x4 * n;
}
/*
* For arbiter version 7, APID ownership table registers have independent
* numbering space for each SPMI bus instance, so each is indexed starting from
* 0.
*/
static void __iomem *
pmic_arb_apid_owner_v7(struct spmi_pmic_arb *pmic_arb, u16 n)
{
return pmic_arb->cnfg + 0x4 * (n - pmic_arb->base_apid);
}
static const struct pmic_arb_ver_ops pmic_arb_v1 = {
.ver_str = "v1",
.ppid_to_apid = pmic_arb_ppid_to_apid_v1,
.non_data_cmd = pmic_arb_non_data_cmd_v1,
.offset = pmic_arb_offset_v1,
.fmt_cmd = pmic_arb_fmt_cmd_v1,
.owner_acc_status = pmic_arb_owner_acc_status_v1,
.acc_enable = pmic_arb_acc_enable_v1,
.irq_status = pmic_arb_irq_status_v1,
.irq_clear = pmic_arb_irq_clear_v1,
.apid_map_offset = pmic_arb_apid_map_offset_v2,
.apid_owner = pmic_arb_apid_owner_v2,
};
static const struct pmic_arb_ver_ops pmic_arb_v2 = {
.ver_str = "v2",
.ppid_to_apid = pmic_arb_ppid_to_apid_v2,
.non_data_cmd = pmic_arb_non_data_cmd_v2,
.offset = pmic_arb_offset_v2,
.fmt_cmd = pmic_arb_fmt_cmd_v2,
.owner_acc_status = pmic_arb_owner_acc_status_v2,
.acc_enable = pmic_arb_acc_enable_v2,
.irq_status = pmic_arb_irq_status_v2,
.irq_clear = pmic_arb_irq_clear_v2,
.apid_map_offset = pmic_arb_apid_map_offset_v2,
.apid_owner = pmic_arb_apid_owner_v2,
};
static const struct pmic_arb_ver_ops pmic_arb_v3 = {
.ver_str = "v3",
.ppid_to_apid = pmic_arb_ppid_to_apid_v2,
.non_data_cmd = pmic_arb_non_data_cmd_v2,
.offset = pmic_arb_offset_v2,
.fmt_cmd = pmic_arb_fmt_cmd_v2,
.owner_acc_status = pmic_arb_owner_acc_status_v3,
.acc_enable = pmic_arb_acc_enable_v2,
.irq_status = pmic_arb_irq_status_v2,
.irq_clear = pmic_arb_irq_clear_v2,
.apid_map_offset = pmic_arb_apid_map_offset_v2,
.apid_owner = pmic_arb_apid_owner_v2,
};
static const struct pmic_arb_ver_ops pmic_arb_v5 = {
.ver_str = "v5",
.ppid_to_apid = pmic_arb_ppid_to_apid_v5,
.non_data_cmd = pmic_arb_non_data_cmd_v2,
.offset = pmic_arb_offset_v5,
.fmt_cmd = pmic_arb_fmt_cmd_v2,
.owner_acc_status = pmic_arb_owner_acc_status_v5,
.acc_enable = pmic_arb_acc_enable_v5,
.irq_status = pmic_arb_irq_status_v5,
.irq_clear = pmic_arb_irq_clear_v5,
.apid_map_offset = pmic_arb_apid_map_offset_v5,
.apid_owner = pmic_arb_apid_owner_v2,
};
static const struct pmic_arb_ver_ops pmic_arb_v7 = {
.ver_str = "v7",
.ppid_to_apid = pmic_arb_ppid_to_apid_v5,
.non_data_cmd = pmic_arb_non_data_cmd_v2,
.offset = pmic_arb_offset_v7,
.fmt_cmd = pmic_arb_fmt_cmd_v2,
.owner_acc_status = pmic_arb_owner_acc_status_v7,
.acc_enable = pmic_arb_acc_enable_v7,
.irq_status = pmic_arb_irq_status_v7,
.irq_clear = pmic_arb_irq_clear_v7,
.apid_map_offset = pmic_arb_apid_map_offset_v7,
.apid_owner = pmic_arb_apid_owner_v7,
};
static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
.activate = qpnpint_irq_domain_activate,
.alloc = qpnpint_irq_domain_alloc,
.free = irq_domain_free_irqs_common,
.translate = qpnpint_irq_domain_translate,
};
static int spmi_pmic_arb_probe(struct platform_device *pdev)
{
struct spmi_pmic_arb *pmic_arb;
struct spmi_controller *ctrl;
struct resource *res;
void __iomem *core;
u32 *mapping_table;
u32 channel, ee, hw_ver;
int err;
ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pmic_arb));
if (!ctrl)
return -ENOMEM;
pmic_arb = spmi_controller_get_drvdata(ctrl);
pmic_arb->spmic = ctrl;
/*
* Please don't replace this with devm_platform_ioremap_resource() or
* devm_ioremap_resource(). These both result in a call to
* devm_request_mem_region() which prevents multiple mappings of this
* register address range. SoCs with PMIC arbiter v7 may define two
* arbiter devices, for the two physical SPMI interfaces, which share
* some register address ranges (i.e. "core", "obsrvr", and "chnls").
* Ensure that both devices probe successfully by calling devm_ioremap()
* which does not result in a devm_request_mem_region() call.
*/
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
core = devm_ioremap(&ctrl->dev, res->start, resource_size(res));
if (IS_ERR(core)) {
err = PTR_ERR(core);
goto err_put_ctrl;
}
pmic_arb->core_size = resource_size(res);
pmic_arb->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
sizeof(*pmic_arb->ppid_to_apid),
GFP_KERNEL);
if (!pmic_arb->ppid_to_apid) {
err = -ENOMEM;
goto err_put_ctrl;
}
hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
pmic_arb->ver_ops = &pmic_arb_v1;
pmic_arb->wr_base = core;
pmic_arb->rd_base = core;
} else {
pmic_arb->core = core;
if (hw_ver < PMIC_ARB_VERSION_V3_MIN)
pmic_arb->ver_ops = &pmic_arb_v2;
else if (hw_ver < PMIC_ARB_VERSION_V5_MIN)
pmic_arb->ver_ops = &pmic_arb_v3;
else if (hw_ver < PMIC_ARB_VERSION_V7_MIN)
pmic_arb->ver_ops = &pmic_arb_v5;
else
pmic_arb->ver_ops = &pmic_arb_v7;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"obsrvr");
pmic_arb->rd_base = devm_ioremap(&ctrl->dev, res->start,
resource_size(res));
if (IS_ERR(pmic_arb->rd_base)) {
err = PTR_ERR(pmic_arb->rd_base);
goto err_put_ctrl;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"chnls");
pmic_arb->wr_base = devm_ioremap(&ctrl->dev, res->start,
resource_size(res));
if (IS_ERR(pmic_arb->wr_base)) {
err = PTR_ERR(pmic_arb->wr_base);
goto err_put_ctrl;
}
}
pmic_arb->max_periphs = PMIC_ARB_MAX_PERIPHS;
if (hw_ver >= PMIC_ARB_VERSION_V7_MIN) {
pmic_arb->max_periphs = PMIC_ARB_MAX_PERIPHS_V7;
/* Optional property for v7: */
of_property_read_u32(pdev->dev.of_node, "qcom,bus-id",
&pmic_arb->bus_instance);
if (pmic_arb->bus_instance > 1) {
err = -EINVAL;
dev_err(&pdev->dev, "invalid bus instance (%u) specified\n",
pmic_arb->bus_instance);
goto err_put_ctrl;
}
if (pmic_arb->bus_instance == 0) {
pmic_arb->base_apid = 0;
pmic_arb->apid_count =
readl_relaxed(core + PMIC_ARB_FEATURES) &
PMIC_ARB_FEATURES_PERIPH_MASK;
} else {
pmic_arb->base_apid =
readl_relaxed(core + PMIC_ARB_FEATURES) &
PMIC_ARB_FEATURES_PERIPH_MASK;
pmic_arb->apid_count =
readl_relaxed(core + PMIC_ARB_FEATURES1) &
PMIC_ARB_FEATURES_PERIPH_MASK;
}
if (pmic_arb->base_apid + pmic_arb->apid_count > pmic_arb->max_periphs) {
err = -EINVAL;
dev_err(&pdev->dev, "Unsupported APID count %d detected\n",
pmic_arb->base_apid + pmic_arb->apid_count);
goto err_put_ctrl;
}
} else if (hw_ver >= PMIC_ARB_VERSION_V5_MIN) {
pmic_arb->base_apid = 0;
pmic_arb->apid_count = readl_relaxed(core + PMIC_ARB_FEATURES) &
PMIC_ARB_FEATURES_PERIPH_MASK;
if (pmic_arb->apid_count > pmic_arb->max_periphs) {
err = -EINVAL;
dev_err(&pdev->dev, "Unsupported APID count %d detected\n",
pmic_arb->apid_count);
goto err_put_ctrl;
}
}
pmic_arb->apid_data = devm_kcalloc(&ctrl->dev, pmic_arb->max_periphs,
sizeof(*pmic_arb->apid_data),
GFP_KERNEL);
if (!pmic_arb->apid_data) {
err = -ENOMEM;
goto err_put_ctrl;
}
dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
pmic_arb->ver_ops->ver_str, hw_ver);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
pmic_arb->intr = devm_ioremap_resource(&ctrl->dev, res);
if (IS_ERR(pmic_arb->intr)) {
err = PTR_ERR(pmic_arb->intr);
goto err_put_ctrl;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
pmic_arb->cnfg = devm_ioremap_resource(&ctrl->dev, res);
if (IS_ERR(pmic_arb->cnfg)) {
err = PTR_ERR(pmic_arb->cnfg);
goto err_put_ctrl;
}
pmic_arb->irq = platform_get_irq_byname(pdev, "periph_irq");
if (pmic_arb->irq < 0) {
err = pmic_arb->irq;
goto err_put_ctrl;
}
err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel);
if (err) {
dev_err(&pdev->dev, "channel unspecified.\n");
goto err_put_ctrl;
}
if (channel > 5) {
dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
channel);
err = -EINVAL;
goto err_put_ctrl;
}
pmic_arb->channel = channel;
err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee);
if (err) {
dev_err(&pdev->dev, "EE unspecified.\n");
goto err_put_ctrl;
}
if (ee > 5) {
dev_err(&pdev->dev, "invalid EE (%u) specified\n", ee);
err = -EINVAL;
goto err_put_ctrl;
}
pmic_arb->ee = ee;
mapping_table = devm_kcalloc(&ctrl->dev, pmic_arb->max_periphs,
sizeof(*mapping_table), GFP_KERNEL);
if (!mapping_table) {
err = -ENOMEM;
goto err_put_ctrl;
}
pmic_arb->mapping_table = mapping_table;
/* Initialize max_apid/min_apid to the opposite bounds, during
* the irq domain translation, we are sure to update these */
pmic_arb->max_apid = 0;
pmic_arb->min_apid = pmic_arb->max_periphs - 1;
platform_set_drvdata(pdev, ctrl);
raw_spin_lock_init(&pmic_arb->lock);
ctrl->cmd = pmic_arb_cmd;
ctrl->read_cmd = pmic_arb_read_cmd;
ctrl->write_cmd = pmic_arb_write_cmd;
if (hw_ver >= PMIC_ARB_VERSION_V5_MIN) {
err = pmic_arb_read_apid_map_v5(pmic_arb);
if (err) {
dev_err(&pdev->dev, "could not read APID->PPID mapping table, rc= %d\n",
err);
goto err_put_ctrl;
}
}
dev_dbg(&pdev->dev, "adding irq domain\n");
pmic_arb->domain = irq_domain_add_tree(pdev->dev.of_node,
&pmic_arb_irq_domain_ops, pmic_arb);
if (!pmic_arb->domain) {
dev_err(&pdev->dev, "unable to create irq_domain\n");
err = -ENOMEM;
goto err_put_ctrl;
}
irq_set_chained_handler_and_data(pmic_arb->irq, pmic_arb_chained_irq,
pmic_arb);
err = spmi_controller_add(ctrl);
if (err)
goto err_domain_remove;
return 0;
err_domain_remove:
irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL);
irq_domain_remove(pmic_arb->domain);
err_put_ctrl:
spmi_controller_put(ctrl);
return err;
}
static void spmi_pmic_arb_remove(struct platform_device *pdev)
{
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
spmi_controller_remove(ctrl);
irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL);
irq_domain_remove(pmic_arb->domain);
spmi_controller_put(ctrl);
}
static const struct of_device_id spmi_pmic_arb_match_table[] = {
{ .compatible = "qcom,spmi-pmic-arb", },
{},
};
MODULE_DEVICE_TABLE(of, spmi_pmic_arb_match_table);
static struct platform_driver spmi_pmic_arb_driver = {
.probe = spmi_pmic_arb_probe,
.remove_new = spmi_pmic_arb_remove,
.driver = {
.name = "spmi_pmic_arb",
.of_match_table = spmi_pmic_arb_match_table,
},
};
module_platform_driver(spmi_pmic_arb_driver);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:spmi_pmic_arb");
| linux-master | drivers/spmi/spmi-pmic-arb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spmi.h>
#include <linux/pm_runtime.h>
#include <dt-bindings/spmi/spmi.h>
#define CREATE_TRACE_POINTS
#include <trace/events/spmi.h>
static bool is_registered;
static DEFINE_IDA(ctrl_ida);
static void spmi_dev_release(struct device *dev)
{
struct spmi_device *sdev = to_spmi_device(dev);
kfree(sdev);
}
static const struct device_type spmi_dev_type = {
.release = spmi_dev_release,
};
static void spmi_ctrl_release(struct device *dev)
{
struct spmi_controller *ctrl = to_spmi_controller(dev);
ida_free(&ctrl_ida, ctrl->nr);
kfree(ctrl);
}
static const struct device_type spmi_ctrl_type = {
.release = spmi_ctrl_release,
};
static int spmi_device_match(struct device *dev, struct device_driver *drv)
{
if (of_driver_match_device(dev, drv))
return 1;
if (drv->name)
return strncmp(dev_name(dev), drv->name,
SPMI_NAME_SIZE) == 0;
return 0;
}
/**
* spmi_device_add() - add a device previously constructed via spmi_device_alloc()
* @sdev: spmi_device to be added
*/
int spmi_device_add(struct spmi_device *sdev)
{
struct spmi_controller *ctrl = sdev->ctrl;
int err;
dev_set_name(&sdev->dev, "%d-%02x", ctrl->nr, sdev->usid);
err = device_add(&sdev->dev);
if (err < 0) {
dev_err(&sdev->dev, "Can't add %s, status %d\n",
dev_name(&sdev->dev), err);
goto err_device_add;
}
dev_dbg(&sdev->dev, "device %s registered\n", dev_name(&sdev->dev));
err_device_add:
return err;
}
EXPORT_SYMBOL_GPL(spmi_device_add);
/**
* spmi_device_remove(): remove an SPMI device
* @sdev: spmi_device to be removed
*/
void spmi_device_remove(struct spmi_device *sdev)
{
device_unregister(&sdev->dev);
}
EXPORT_SYMBOL_GPL(spmi_device_remove);
static inline int
spmi_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid)
{
int ret;
if (!ctrl || !ctrl->cmd || ctrl->dev.type != &spmi_ctrl_type)
return -EINVAL;
ret = ctrl->cmd(ctrl, opcode, sid);
trace_spmi_cmd(opcode, sid, ret);
return ret;
}
static inline int spmi_read_cmd(struct spmi_controller *ctrl, u8 opcode,
u8 sid, u16 addr, u8 *buf, size_t len)
{
int ret;
if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type)
return -EINVAL;
trace_spmi_read_begin(opcode, sid, addr);
ret = ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len);
trace_spmi_read_end(opcode, sid, addr, ret, len, buf);
return ret;
}
static inline int spmi_write_cmd(struct spmi_controller *ctrl, u8 opcode,
u8 sid, u16 addr, const u8 *buf, size_t len)
{
int ret;
if (!ctrl || !ctrl->write_cmd || ctrl->dev.type != &spmi_ctrl_type)
return -EINVAL;
trace_spmi_write_begin(opcode, sid, addr, len, buf);
ret = ctrl->write_cmd(ctrl, opcode, sid, addr, buf, len);
trace_spmi_write_end(opcode, sid, addr, ret);
return ret;
}
/**
* spmi_register_read() - register read
* @sdev: SPMI device.
* @addr: slave register address (5-bit address).
* @buf: buffer to be populated with data from the Slave.
*
* Reads 1 byte of data from a Slave device register.
*/
int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf)
{
/* 5-bit register address */
if (addr > 0x1F)
return -EINVAL;
return spmi_read_cmd(sdev->ctrl, SPMI_CMD_READ, sdev->usid, addr,
buf, 1);
}
EXPORT_SYMBOL_GPL(spmi_register_read);
/**
* spmi_ext_register_read() - extended register read
* @sdev: SPMI device.
* @addr: slave register address (8-bit address).
* @buf: buffer to be populated with data from the Slave.
* @len: the request number of bytes to read (up to 16 bytes).
*
* Reads up to 16 bytes of data from the extended register space on a
* Slave device.
*/
int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
size_t len)
{
/* 8-bit register address, up to 16 bytes */
if (len == 0 || len > 16)
return -EINVAL;
return spmi_read_cmd(sdev->ctrl, SPMI_CMD_EXT_READ, sdev->usid, addr,
buf, len);
}
EXPORT_SYMBOL_GPL(spmi_ext_register_read);
/**
* spmi_ext_register_readl() - extended register read long
* @sdev: SPMI device.
* @addr: slave register address (16-bit address).
* @buf: buffer to be populated with data from the Slave.
* @len: the request number of bytes to read (up to 8 bytes).
*
* Reads up to 8 bytes of data from the extended register space on a
* Slave device using 16-bit address.
*/
int spmi_ext_register_readl(struct spmi_device *sdev, u16 addr, u8 *buf,
size_t len)
{
/* 16-bit register address, up to 8 bytes */
if (len == 0 || len > 8)
return -EINVAL;
return spmi_read_cmd(sdev->ctrl, SPMI_CMD_EXT_READL, sdev->usid, addr,
buf, len);
}
EXPORT_SYMBOL_GPL(spmi_ext_register_readl);
/**
* spmi_register_write() - register write
* @sdev: SPMI device
* @addr: slave register address (5-bit address).
* @data: buffer containing the data to be transferred to the Slave.
*
* Writes 1 byte of data to a Slave device register.
*/
int spmi_register_write(struct spmi_device *sdev, u8 addr, u8 data)
{
/* 5-bit register address */
if (addr > 0x1F)
return -EINVAL;
return spmi_write_cmd(sdev->ctrl, SPMI_CMD_WRITE, sdev->usid, addr,
&data, 1);
}
EXPORT_SYMBOL_GPL(spmi_register_write);
/**
* spmi_register_zero_write() - register zero write
* @sdev: SPMI device.
* @data: the data to be written to register 0 (7-bits).
*
* Writes data to register 0 of the Slave device.
*/
int spmi_register_zero_write(struct spmi_device *sdev, u8 data)
{
return spmi_write_cmd(sdev->ctrl, SPMI_CMD_ZERO_WRITE, sdev->usid, 0,
&data, 1);
}
EXPORT_SYMBOL_GPL(spmi_register_zero_write);
/**
* spmi_ext_register_write() - extended register write
* @sdev: SPMI device.
* @addr: slave register address (8-bit address).
* @buf: buffer containing the data to be transferred to the Slave.
* @len: the request number of bytes to read (up to 16 bytes).
*
* Writes up to 16 bytes of data to the extended register space of a
* Slave device.
*/
int spmi_ext_register_write(struct spmi_device *sdev, u8 addr, const u8 *buf,
size_t len)
{
/* 8-bit register address, up to 16 bytes */
if (len == 0 || len > 16)
return -EINVAL;
return spmi_write_cmd(sdev->ctrl, SPMI_CMD_EXT_WRITE, sdev->usid, addr,
buf, len);
}
EXPORT_SYMBOL_GPL(spmi_ext_register_write);
/**
* spmi_ext_register_writel() - extended register write long
* @sdev: SPMI device.
* @addr: slave register address (16-bit address).
* @buf: buffer containing the data to be transferred to the Slave.
* @len: the request number of bytes to read (up to 8 bytes).
*
* Writes up to 8 bytes of data to the extended register space of a
* Slave device using 16-bit address.
*/
int spmi_ext_register_writel(struct spmi_device *sdev, u16 addr, const u8 *buf,
size_t len)
{
/* 4-bit Slave Identifier, 16-bit register address, up to 8 bytes */
if (len == 0 || len > 8)
return -EINVAL;
return spmi_write_cmd(sdev->ctrl, SPMI_CMD_EXT_WRITEL, sdev->usid,
addr, buf, len);
}
EXPORT_SYMBOL_GPL(spmi_ext_register_writel);
/**
* spmi_command_reset() - sends RESET command to the specified slave
* @sdev: SPMI device.
*
* The Reset command initializes the Slave and forces all registers to
* their reset values. The Slave shall enter the STARTUP state after
* receiving a Reset command.
*/
int spmi_command_reset(struct spmi_device *sdev)
{
return spmi_cmd(sdev->ctrl, SPMI_CMD_RESET, sdev->usid);
}
EXPORT_SYMBOL_GPL(spmi_command_reset);
/**
* spmi_command_sleep() - sends SLEEP command to the specified SPMI device
* @sdev: SPMI device.
*
* The Sleep command causes the Slave to enter the user defined SLEEP state.
*/
int spmi_command_sleep(struct spmi_device *sdev)
{
return spmi_cmd(sdev->ctrl, SPMI_CMD_SLEEP, sdev->usid);
}
EXPORT_SYMBOL_GPL(spmi_command_sleep);
/**
* spmi_command_wakeup() - sends WAKEUP command to the specified SPMI device
* @sdev: SPMI device.
*
* The Wakeup command causes the Slave to move from the SLEEP state to
* the ACTIVE state.
*/
int spmi_command_wakeup(struct spmi_device *sdev)
{
return spmi_cmd(sdev->ctrl, SPMI_CMD_WAKEUP, sdev->usid);
}
EXPORT_SYMBOL_GPL(spmi_command_wakeup);
/**
* spmi_command_shutdown() - sends SHUTDOWN command to the specified SPMI device
* @sdev: SPMI device.
*
* The Shutdown command causes the Slave to enter the SHUTDOWN state.
*/
int spmi_command_shutdown(struct spmi_device *sdev)
{
return spmi_cmd(sdev->ctrl, SPMI_CMD_SHUTDOWN, sdev->usid);
}
EXPORT_SYMBOL_GPL(spmi_command_shutdown);
static int spmi_drv_probe(struct device *dev)
{
const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
struct spmi_device *sdev = to_spmi_device(dev);
int err;
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
err = sdrv->probe(sdev);
if (err)
goto fail_probe;
return 0;
fail_probe:
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
return err;
}
static void spmi_drv_remove(struct device *dev)
{
const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
pm_runtime_get_sync(dev);
if (sdrv->remove)
sdrv->remove(to_spmi_device(dev));
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
}
static void spmi_drv_shutdown(struct device *dev)
{
const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
if (sdrv && sdrv->shutdown)
sdrv->shutdown(to_spmi_device(dev));
}
static int spmi_drv_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
int ret;
ret = of_device_uevent_modalias(dev, env);
if (ret != -ENODEV)
return ret;
return 0;
}
static struct bus_type spmi_bus_type = {
.name = "spmi",
.match = spmi_device_match,
.probe = spmi_drv_probe,
.remove = spmi_drv_remove,
.shutdown = spmi_drv_shutdown,
.uevent = spmi_drv_uevent,
};
/**
* spmi_device_from_of() - get the associated SPMI device from a device node
*
* @np: device node
*
* Returns the struct spmi_device associated with a device node or NULL.
*/
struct spmi_device *spmi_device_from_of(struct device_node *np)
{
struct device *dev = bus_find_device_by_of_node(&spmi_bus_type, np);
if (dev)
return to_spmi_device(dev);
return NULL;
}
EXPORT_SYMBOL_GPL(spmi_device_from_of);
/**
* spmi_device_alloc() - Allocate a new SPMI device
* @ctrl: associated controller
*
* Caller is responsible for either calling spmi_device_add() to add the
* newly allocated controller, or calling spmi_device_put() to discard it.
*/
struct spmi_device *spmi_device_alloc(struct spmi_controller *ctrl)
{
struct spmi_device *sdev;
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev)
return NULL;
sdev->ctrl = ctrl;
device_initialize(&sdev->dev);
sdev->dev.parent = &ctrl->dev;
sdev->dev.bus = &spmi_bus_type;
sdev->dev.type = &spmi_dev_type;
return sdev;
}
EXPORT_SYMBOL_GPL(spmi_device_alloc);
/**
* spmi_controller_alloc() - Allocate a new SPMI controller
* @parent: parent device
* @size: size of private data
*
* Caller is responsible for either calling spmi_controller_add() to add the
* newly allocated controller, or calling spmi_controller_put() to discard it.
* The allocated private data region may be accessed via
* spmi_controller_get_drvdata()
*/
struct spmi_controller *spmi_controller_alloc(struct device *parent,
size_t size)
{
struct spmi_controller *ctrl;
int id;
if (WARN_ON(!parent))
return NULL;
ctrl = kzalloc(sizeof(*ctrl) + size, GFP_KERNEL);
if (!ctrl)
return NULL;
device_initialize(&ctrl->dev);
ctrl->dev.type = &spmi_ctrl_type;
ctrl->dev.bus = &spmi_bus_type;
ctrl->dev.parent = parent;
ctrl->dev.of_node = parent->of_node;
spmi_controller_set_drvdata(ctrl, &ctrl[1]);
id = ida_alloc(&ctrl_ida, GFP_KERNEL);
if (id < 0) {
dev_err(parent,
"unable to allocate SPMI controller identifier.\n");
spmi_controller_put(ctrl);
return NULL;
}
ctrl->nr = id;
dev_set_name(&ctrl->dev, "spmi-%d", id);
dev_dbg(&ctrl->dev, "allocated controller 0x%p id %d\n", ctrl, id);
return ctrl;
}
EXPORT_SYMBOL_GPL(spmi_controller_alloc);
static void of_spmi_register_devices(struct spmi_controller *ctrl)
{
struct device_node *node;
int err;
if (!ctrl->dev.of_node)
return;
for_each_available_child_of_node(ctrl->dev.of_node, node) {
struct spmi_device *sdev;
u32 reg[2];
dev_dbg(&ctrl->dev, "adding child %pOF\n", node);
err = of_property_read_u32_array(node, "reg", reg, 2);
if (err) {
dev_err(&ctrl->dev,
"node %pOF err (%d) does not have 'reg' property\n",
node, err);
continue;
}
if (reg[1] != SPMI_USID) {
dev_err(&ctrl->dev,
"node %pOF contains unsupported 'reg' entry\n",
node);
continue;
}
if (reg[0] >= SPMI_MAX_SLAVE_ID) {
dev_err(&ctrl->dev, "invalid usid on node %pOF\n", node);
continue;
}
dev_dbg(&ctrl->dev, "read usid %02x\n", reg[0]);
sdev = spmi_device_alloc(ctrl);
if (!sdev)
continue;
sdev->dev.of_node = node;
sdev->usid = (u8)reg[0];
err = spmi_device_add(sdev);
if (err) {
dev_err(&sdev->dev,
"failure adding device. status %d\n", err);
spmi_device_put(sdev);
}
}
}
/**
* spmi_controller_add() - Add an SPMI controller
* @ctrl: controller to be registered.
*
* Register a controller previously allocated via spmi_controller_alloc() with
* the SPMI core.
*/
int spmi_controller_add(struct spmi_controller *ctrl)
{
int ret;
/* Can't register until after driver model init */
if (WARN_ON(!is_registered))
return -EAGAIN;
ret = device_add(&ctrl->dev);
if (ret)
return ret;
if (IS_ENABLED(CONFIG_OF))
of_spmi_register_devices(ctrl);
dev_dbg(&ctrl->dev, "spmi-%d registered: dev:%p\n",
ctrl->nr, &ctrl->dev);
return 0;
};
EXPORT_SYMBOL_GPL(spmi_controller_add);
/* Remove a device associated with a controller */
static int spmi_ctrl_remove_device(struct device *dev, void *data)
{
struct spmi_device *spmidev = to_spmi_device(dev);
if (dev->type == &spmi_dev_type)
spmi_device_remove(spmidev);
return 0;
}
/**
* spmi_controller_remove(): remove an SPMI controller
* @ctrl: controller to remove
*
* Remove a SPMI controller. Caller is responsible for calling
* spmi_controller_put() to discard the allocated controller.
*/
void spmi_controller_remove(struct spmi_controller *ctrl)
{
if (!ctrl)
return;
device_for_each_child(&ctrl->dev, NULL, spmi_ctrl_remove_device);
device_del(&ctrl->dev);
}
EXPORT_SYMBOL_GPL(spmi_controller_remove);
/**
* __spmi_driver_register() - Register client driver with SPMI core
* @sdrv: client driver to be associated with client-device.
* @owner: module owner
*
* This API will register the client driver with the SPMI framework.
* It is typically called from the driver's module-init function.
*/
int __spmi_driver_register(struct spmi_driver *sdrv, struct module *owner)
{
sdrv->driver.bus = &spmi_bus_type;
sdrv->driver.owner = owner;
return driver_register(&sdrv->driver);
}
EXPORT_SYMBOL_GPL(__spmi_driver_register);
static void __exit spmi_exit(void)
{
bus_unregister(&spmi_bus_type);
}
module_exit(spmi_exit);
static int __init spmi_init(void)
{
int ret;
ret = bus_register(&spmi_bus_type);
if (ret)
return ret;
is_registered = true;
return 0;
}
postcore_initcall(spmi_init);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SPMI module");
MODULE_ALIAS("platform:spmi");
| linux-master | drivers/spmi/spmi.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2021 MediaTek Inc.
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/spmi.h>
#define SWINF_IDLE 0x00
#define SWINF_WFVLDCLR 0x06
#define GET_SWINF(x) (((x) >> 1) & 0x7)
#define PMIF_CMD_REG_0 0
#define PMIF_CMD_REG 1
#define PMIF_CMD_EXT_REG 2
#define PMIF_CMD_EXT_REG_LONG 3
#define PMIF_DELAY_US 10
#define PMIF_TIMEOUT_US (10 * 1000)
#define PMIF_CHAN_OFFSET 0x5
#define PMIF_MAX_CLKS 3
#define SPMI_OP_ST_BUSY 1
struct ch_reg {
u32 ch_sta;
u32 wdata;
u32 rdata;
u32 ch_send;
u32 ch_rdy;
};
struct pmif_data {
const u32 *regs;
const u32 *spmimst_regs;
u32 soc_chan;
};
struct pmif {
void __iomem *base;
void __iomem *spmimst_base;
struct ch_reg chan;
struct clk_bulk_data clks[PMIF_MAX_CLKS];
size_t nclks;
const struct pmif_data *data;
};
static const char * const pmif_clock_names[] = {
"pmif_sys_ck", "pmif_tmr_ck", "spmimst_clk_mux",
};
enum pmif_regs {
PMIF_INIT_DONE,
PMIF_INF_EN,
PMIF_ARB_EN,
PMIF_CMDISSUE_EN,
PMIF_TIMER_CTRL,
PMIF_SPI_MODE_CTRL,
PMIF_IRQ_EVENT_EN_0,
PMIF_IRQ_FLAG_0,
PMIF_IRQ_CLR_0,
PMIF_IRQ_EVENT_EN_1,
PMIF_IRQ_FLAG_1,
PMIF_IRQ_CLR_1,
PMIF_IRQ_EVENT_EN_2,
PMIF_IRQ_FLAG_2,
PMIF_IRQ_CLR_2,
PMIF_IRQ_EVENT_EN_3,
PMIF_IRQ_FLAG_3,
PMIF_IRQ_CLR_3,
PMIF_IRQ_EVENT_EN_4,
PMIF_IRQ_FLAG_4,
PMIF_IRQ_CLR_4,
PMIF_WDT_EVENT_EN_0,
PMIF_WDT_FLAG_0,
PMIF_WDT_EVENT_EN_1,
PMIF_WDT_FLAG_1,
PMIF_SWINF_0_STA,
PMIF_SWINF_0_WDATA_31_0,
PMIF_SWINF_0_RDATA_31_0,
PMIF_SWINF_0_ACC,
PMIF_SWINF_0_VLD_CLR,
PMIF_SWINF_1_STA,
PMIF_SWINF_1_WDATA_31_0,
PMIF_SWINF_1_RDATA_31_0,
PMIF_SWINF_1_ACC,
PMIF_SWINF_1_VLD_CLR,
PMIF_SWINF_2_STA,
PMIF_SWINF_2_WDATA_31_0,
PMIF_SWINF_2_RDATA_31_0,
PMIF_SWINF_2_ACC,
PMIF_SWINF_2_VLD_CLR,
PMIF_SWINF_3_STA,
PMIF_SWINF_3_WDATA_31_0,
PMIF_SWINF_3_RDATA_31_0,
PMIF_SWINF_3_ACC,
PMIF_SWINF_3_VLD_CLR,
};
static const u32 mt6873_regs[] = {
[PMIF_INIT_DONE] = 0x0000,
[PMIF_INF_EN] = 0x0024,
[PMIF_ARB_EN] = 0x0150,
[PMIF_CMDISSUE_EN] = 0x03B4,
[PMIF_TIMER_CTRL] = 0x03E0,
[PMIF_SPI_MODE_CTRL] = 0x0400,
[PMIF_IRQ_EVENT_EN_0] = 0x0418,
[PMIF_IRQ_FLAG_0] = 0x0420,
[PMIF_IRQ_CLR_0] = 0x0424,
[PMIF_IRQ_EVENT_EN_1] = 0x0428,
[PMIF_IRQ_FLAG_1] = 0x0430,
[PMIF_IRQ_CLR_1] = 0x0434,
[PMIF_IRQ_EVENT_EN_2] = 0x0438,
[PMIF_IRQ_FLAG_2] = 0x0440,
[PMIF_IRQ_CLR_2] = 0x0444,
[PMIF_IRQ_EVENT_EN_3] = 0x0448,
[PMIF_IRQ_FLAG_3] = 0x0450,
[PMIF_IRQ_CLR_3] = 0x0454,
[PMIF_IRQ_EVENT_EN_4] = 0x0458,
[PMIF_IRQ_FLAG_4] = 0x0460,
[PMIF_IRQ_CLR_4] = 0x0464,
[PMIF_WDT_EVENT_EN_0] = 0x046C,
[PMIF_WDT_FLAG_0] = 0x0470,
[PMIF_WDT_EVENT_EN_1] = 0x0474,
[PMIF_WDT_FLAG_1] = 0x0478,
[PMIF_SWINF_0_ACC] = 0x0C00,
[PMIF_SWINF_0_WDATA_31_0] = 0x0C04,
[PMIF_SWINF_0_RDATA_31_0] = 0x0C14,
[PMIF_SWINF_0_VLD_CLR] = 0x0C24,
[PMIF_SWINF_0_STA] = 0x0C28,
[PMIF_SWINF_1_ACC] = 0x0C40,
[PMIF_SWINF_1_WDATA_31_0] = 0x0C44,
[PMIF_SWINF_1_RDATA_31_0] = 0x0C54,
[PMIF_SWINF_1_VLD_CLR] = 0x0C64,
[PMIF_SWINF_1_STA] = 0x0C68,
[PMIF_SWINF_2_ACC] = 0x0C80,
[PMIF_SWINF_2_WDATA_31_0] = 0x0C84,
[PMIF_SWINF_2_RDATA_31_0] = 0x0C94,
[PMIF_SWINF_2_VLD_CLR] = 0x0CA4,
[PMIF_SWINF_2_STA] = 0x0CA8,
[PMIF_SWINF_3_ACC] = 0x0CC0,
[PMIF_SWINF_3_WDATA_31_0] = 0x0CC4,
[PMIF_SWINF_3_RDATA_31_0] = 0x0CD4,
[PMIF_SWINF_3_VLD_CLR] = 0x0CE4,
[PMIF_SWINF_3_STA] = 0x0CE8,
};
static const u32 mt8195_regs[] = {
[PMIF_INIT_DONE] = 0x0000,
[PMIF_INF_EN] = 0x0024,
[PMIF_ARB_EN] = 0x0150,
[PMIF_CMDISSUE_EN] = 0x03B8,
[PMIF_TIMER_CTRL] = 0x03E4,
[PMIF_SPI_MODE_CTRL] = 0x0408,
[PMIF_IRQ_EVENT_EN_0] = 0x0420,
[PMIF_IRQ_FLAG_0] = 0x0428,
[PMIF_IRQ_CLR_0] = 0x042C,
[PMIF_IRQ_EVENT_EN_1] = 0x0430,
[PMIF_IRQ_FLAG_1] = 0x0438,
[PMIF_IRQ_CLR_1] = 0x043C,
[PMIF_IRQ_EVENT_EN_2] = 0x0440,
[PMIF_IRQ_FLAG_2] = 0x0448,
[PMIF_IRQ_CLR_2] = 0x044C,
[PMIF_IRQ_EVENT_EN_3] = 0x0450,
[PMIF_IRQ_FLAG_3] = 0x0458,
[PMIF_IRQ_CLR_3] = 0x045C,
[PMIF_IRQ_EVENT_EN_4] = 0x0460,
[PMIF_IRQ_FLAG_4] = 0x0468,
[PMIF_IRQ_CLR_4] = 0x046C,
[PMIF_WDT_EVENT_EN_0] = 0x0474,
[PMIF_WDT_FLAG_0] = 0x0478,
[PMIF_WDT_EVENT_EN_1] = 0x047C,
[PMIF_WDT_FLAG_1] = 0x0480,
[PMIF_SWINF_0_ACC] = 0x0800,
[PMIF_SWINF_0_WDATA_31_0] = 0x0804,
[PMIF_SWINF_0_RDATA_31_0] = 0x0814,
[PMIF_SWINF_0_VLD_CLR] = 0x0824,
[PMIF_SWINF_0_STA] = 0x0828,
[PMIF_SWINF_1_ACC] = 0x0840,
[PMIF_SWINF_1_WDATA_31_0] = 0x0844,
[PMIF_SWINF_1_RDATA_31_0] = 0x0854,
[PMIF_SWINF_1_VLD_CLR] = 0x0864,
[PMIF_SWINF_1_STA] = 0x0868,
[PMIF_SWINF_2_ACC] = 0x0880,
[PMIF_SWINF_2_WDATA_31_0] = 0x0884,
[PMIF_SWINF_2_RDATA_31_0] = 0x0894,
[PMIF_SWINF_2_VLD_CLR] = 0x08A4,
[PMIF_SWINF_2_STA] = 0x08A8,
[PMIF_SWINF_3_ACC] = 0x08C0,
[PMIF_SWINF_3_WDATA_31_0] = 0x08C4,
[PMIF_SWINF_3_RDATA_31_0] = 0x08D4,
[PMIF_SWINF_3_VLD_CLR] = 0x08E4,
[PMIF_SWINF_3_STA] = 0x08E8,
};
enum spmi_regs {
SPMI_OP_ST_CTRL,
SPMI_GRP_ID_EN,
SPMI_OP_ST_STA,
SPMI_MST_SAMPL,
SPMI_MST_REQ_EN,
SPMI_REC_CTRL,
SPMI_REC0,
SPMI_REC1,
SPMI_REC2,
SPMI_REC3,
SPMI_REC4,
SPMI_MST_DBG,
/* MT8195 spmi regs */
SPMI_MST_RCS_CTRL,
SPMI_SLV_3_0_EINT,
SPMI_SLV_7_4_EINT,
SPMI_SLV_B_8_EINT,
SPMI_SLV_F_C_EINT,
SPMI_REC_CMD_DEC,
SPMI_DEC_DBG,
};
static const u32 mt6873_spmi_regs[] = {
[SPMI_OP_ST_CTRL] = 0x0000,
[SPMI_GRP_ID_EN] = 0x0004,
[SPMI_OP_ST_STA] = 0x0008,
[SPMI_MST_SAMPL] = 0x000c,
[SPMI_MST_REQ_EN] = 0x0010,
[SPMI_REC_CTRL] = 0x0040,
[SPMI_REC0] = 0x0044,
[SPMI_REC1] = 0x0048,
[SPMI_REC2] = 0x004c,
[SPMI_REC3] = 0x0050,
[SPMI_REC4] = 0x0054,
[SPMI_MST_DBG] = 0x00fc,
};
static const u32 mt8195_spmi_regs[] = {
[SPMI_OP_ST_CTRL] = 0x0000,
[SPMI_GRP_ID_EN] = 0x0004,
[SPMI_OP_ST_STA] = 0x0008,
[SPMI_MST_SAMPL] = 0x000C,
[SPMI_MST_REQ_EN] = 0x0010,
[SPMI_MST_RCS_CTRL] = 0x0014,
[SPMI_SLV_3_0_EINT] = 0x0020,
[SPMI_SLV_7_4_EINT] = 0x0024,
[SPMI_SLV_B_8_EINT] = 0x0028,
[SPMI_SLV_F_C_EINT] = 0x002C,
[SPMI_REC_CTRL] = 0x0040,
[SPMI_REC0] = 0x0044,
[SPMI_REC1] = 0x0048,
[SPMI_REC2] = 0x004C,
[SPMI_REC3] = 0x0050,
[SPMI_REC4] = 0x0054,
[SPMI_REC_CMD_DEC] = 0x005C,
[SPMI_DEC_DBG] = 0x00F8,
[SPMI_MST_DBG] = 0x00FC,
};
static u32 pmif_readl(struct pmif *arb, enum pmif_regs reg)
{
return readl(arb->base + arb->data->regs[reg]);
}
static void pmif_writel(struct pmif *arb, u32 val, enum pmif_regs reg)
{
writel(val, arb->base + arb->data->regs[reg]);
}
static void mtk_spmi_writel(struct pmif *arb, u32 val, enum spmi_regs reg)
{
writel(val, arb->spmimst_base + arb->data->spmimst_regs[reg]);
}
static bool pmif_is_fsm_vldclr(struct pmif *arb)
{
u32 reg_rdata;
reg_rdata = pmif_readl(arb, arb->chan.ch_sta);
return GET_SWINF(reg_rdata) == SWINF_WFVLDCLR;
}
static int pmif_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
struct pmif *arb = spmi_controller_get_drvdata(ctrl);
u32 rdata, cmd;
int ret;
/* Check the opcode */
if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
return -EINVAL;
cmd = opc - SPMI_CMD_RESET;
mtk_spmi_writel(arb, (cmd << 0x4) | sid, SPMI_OP_ST_CTRL);
ret = readl_poll_timeout_atomic(arb->spmimst_base + arb->data->spmimst_regs[SPMI_OP_ST_STA],
rdata, (rdata & SPMI_OP_ST_BUSY) == SPMI_OP_ST_BUSY,
PMIF_DELAY_US, PMIF_TIMEOUT_US);
if (ret < 0)
dev_err(&ctrl->dev, "timeout, err = %d\n", ret);
return ret;
}
static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u16 addr, u8 *buf, size_t len)
{
struct pmif *arb = spmi_controller_get_drvdata(ctrl);
struct ch_reg *inf_reg;
int ret;
u32 data, cmd;
/* Check for argument validation. */
if (sid & ~0xf) {
dev_err(&ctrl->dev, "exceed the max slv id\n");
return -EINVAL;
}
if (len > 4) {
dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len);
return -EINVAL;
}
if (opc >= 0x60 && opc <= 0x7f)
opc = PMIF_CMD_REG;
else if ((opc >= 0x20 && opc <= 0x2f) || (opc >= 0x38 && opc <= 0x3f))
opc = PMIF_CMD_EXT_REG_LONG;
else
return -EINVAL;
/* Wait for Software Interface FSM state to be IDLE. */
inf_reg = &arb->chan;
ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
data, GET_SWINF(data) == SWINF_IDLE,
PMIF_DELAY_US, PMIF_TIMEOUT_US);
if (ret < 0) {
/* set channel ready if the data has transferred */
if (pmif_is_fsm_vldclr(arb))
pmif_writel(arb, 1, inf_reg->ch_rdy);
dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
return ret;
}
/* Send the command. */
cmd = (opc << 30) | (sid << 24) | ((len - 1) << 16) | addr;
pmif_writel(arb, cmd, inf_reg->ch_send);
/*
* Wait for Software Interface FSM state to be WFVLDCLR,
* read the data and clear the valid flag.
*/
ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
data, GET_SWINF(data) == SWINF_WFVLDCLR,
PMIF_DELAY_US, PMIF_TIMEOUT_US);
if (ret < 0) {
dev_err(&ctrl->dev, "failed to wait for SWINF_WFVLDCLR\n");
return ret;
}
data = pmif_readl(arb, inf_reg->rdata);
memcpy(buf, &data, len);
pmif_writel(arb, 1, inf_reg->ch_rdy);
return 0;
}
static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u16 addr, const u8 *buf, size_t len)
{
struct pmif *arb = spmi_controller_get_drvdata(ctrl);
struct ch_reg *inf_reg;
int ret;
u32 data, cmd;
if (len > 4) {
dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len);
return -EINVAL;
}
/* Check the opcode */
if (opc >= 0x40 && opc <= 0x5F)
opc = PMIF_CMD_REG;
else if ((opc <= 0xF) || (opc >= 0x30 && opc <= 0x37))
opc = PMIF_CMD_EXT_REG_LONG;
else if (opc >= 0x80)
opc = PMIF_CMD_REG_0;
else
return -EINVAL;
/* Wait for Software Interface FSM state to be IDLE. */
inf_reg = &arb->chan;
ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
data, GET_SWINF(data) == SWINF_IDLE,
PMIF_DELAY_US, PMIF_TIMEOUT_US);
if (ret < 0) {
/* set channel ready if the data has transferred */
if (pmif_is_fsm_vldclr(arb))
pmif_writel(arb, 1, inf_reg->ch_rdy);
dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
return ret;
}
/* Set the write data. */
memcpy(&data, buf, len);
pmif_writel(arb, data, inf_reg->wdata);
/* Send the command. */
cmd = (opc << 30) | BIT(29) | (sid << 24) | ((len - 1) << 16) | addr;
pmif_writel(arb, cmd, inf_reg->ch_send);
return 0;
}
static const struct pmif_data mt6873_pmif_arb = {
.regs = mt6873_regs,
.spmimst_regs = mt6873_spmi_regs,
.soc_chan = 2,
};
static const struct pmif_data mt8195_pmif_arb = {
.regs = mt8195_regs,
.spmimst_regs = mt8195_spmi_regs,
.soc_chan = 2,
};
static int mtk_spmi_probe(struct platform_device *pdev)
{
struct pmif *arb;
struct spmi_controller *ctrl;
int err, i;
u32 chan_offset;
ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*arb));
if (!ctrl)
return -ENOMEM;
arb = spmi_controller_get_drvdata(ctrl);
arb->data = device_get_match_data(&pdev->dev);
if (!arb->data) {
err = -EINVAL;
dev_err(&pdev->dev, "Cannot get drv_data\n");
goto err_put_ctrl;
}
arb->base = devm_platform_ioremap_resource_byname(pdev, "pmif");
if (IS_ERR(arb->base)) {
err = PTR_ERR(arb->base);
goto err_put_ctrl;
}
arb->spmimst_base = devm_platform_ioremap_resource_byname(pdev, "spmimst");
if (IS_ERR(arb->spmimst_base)) {
err = PTR_ERR(arb->spmimst_base);
goto err_put_ctrl;
}
arb->nclks = ARRAY_SIZE(pmif_clock_names);
for (i = 0; i < arb->nclks; i++)
arb->clks[i].id = pmif_clock_names[i];
err = devm_clk_bulk_get(&pdev->dev, arb->nclks, arb->clks);
if (err) {
dev_err(&pdev->dev, "Failed to get clocks: %d\n", err);
goto err_put_ctrl;
}
err = clk_bulk_prepare_enable(arb->nclks, arb->clks);
if (err) {
dev_err(&pdev->dev, "Failed to enable clocks: %d\n", err);
goto err_put_ctrl;
}
ctrl->cmd = pmif_arb_cmd;
ctrl->read_cmd = pmif_spmi_read_cmd;
ctrl->write_cmd = pmif_spmi_write_cmd;
chan_offset = PMIF_CHAN_OFFSET * arb->data->soc_chan;
arb->chan.ch_sta = PMIF_SWINF_0_STA + chan_offset;
arb->chan.wdata = PMIF_SWINF_0_WDATA_31_0 + chan_offset;
arb->chan.rdata = PMIF_SWINF_0_RDATA_31_0 + chan_offset;
arb->chan.ch_send = PMIF_SWINF_0_ACC + chan_offset;
arb->chan.ch_rdy = PMIF_SWINF_0_VLD_CLR + chan_offset;
platform_set_drvdata(pdev, ctrl);
err = spmi_controller_add(ctrl);
if (err)
goto err_domain_remove;
return 0;
err_domain_remove:
clk_bulk_disable_unprepare(arb->nclks, arb->clks);
err_put_ctrl:
spmi_controller_put(ctrl);
return err;
}
static void mtk_spmi_remove(struct platform_device *pdev)
{
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
struct pmif *arb = spmi_controller_get_drvdata(ctrl);
clk_bulk_disable_unprepare(arb->nclks, arb->clks);
spmi_controller_remove(ctrl);
spmi_controller_put(ctrl);
}
static const struct of_device_id mtk_spmi_match_table[] = {
{
.compatible = "mediatek,mt6873-spmi",
.data = &mt6873_pmif_arb,
}, {
.compatible = "mediatek,mt8195-spmi",
.data = &mt8195_pmif_arb,
}, {
/* sentinel */
},
};
MODULE_DEVICE_TABLE(of, mtk_spmi_match_table);
static struct platform_driver mtk_spmi_driver = {
.driver = {
.name = "spmi-mtk",
.of_match_table = mtk_spmi_match_table,
},
.probe = mtk_spmi_probe,
.remove_new = mtk_spmi_remove,
};
module_platform_driver(mtk_spmi_driver);
MODULE_AUTHOR("Hsin-Hsiung Wang <[email protected]>");
MODULE_DESCRIPTION("MediaTek SPMI Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spmi/spmi-mtk-pmif.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* EISA bus support functions for sysfs.
*
* (C) 2002, 2003 Marc Zyngier <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/eisa.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <asm/io.h>
#define SLOT_ADDRESS(r,n) (r->bus_base_addr + (0x1000 * n))
#define EISA_DEVINFO(i,s) { .id = { .sig = i }, .name = s }
struct eisa_device_info {
struct eisa_device_id id;
char name[50];
};
#ifdef CONFIG_EISA_NAMES
static struct eisa_device_info __initdata eisa_table[] = {
#include "devlist.h"
};
#define EISA_INFOS (sizeof (eisa_table) / (sizeof (struct eisa_device_info)))
#endif
#define EISA_MAX_FORCED_DEV 16
static int enable_dev[EISA_MAX_FORCED_DEV];
static unsigned int enable_dev_count;
static int disable_dev[EISA_MAX_FORCED_DEV];
static unsigned int disable_dev_count;
static int is_forced_dev(int *forced_tab,
int forced_count,
struct eisa_root_device *root,
struct eisa_device *edev)
{
int i, x;
for (i = 0; i < forced_count; i++) {
x = (root->bus_nr << 8) | edev->slot;
if (forced_tab[i] == x)
return 1;
}
return 0;
}
static void __init eisa_name_device(struct eisa_device *edev)
{
#ifdef CONFIG_EISA_NAMES
int i;
for (i = 0; i < EISA_INFOS; i++) {
if (!strcmp(edev->id.sig, eisa_table[i].id.sig)) {
strscpy(edev->pretty_name,
eisa_table[i].name,
sizeof(edev->pretty_name));
return;
}
}
/* No name was found */
sprintf(edev->pretty_name, "EISA device %.7s", edev->id.sig);
#endif
}
static char __init *decode_eisa_sig(unsigned long addr)
{
static char sig_str[EISA_SIG_LEN];
u8 sig[4];
u16 rev;
int i;
for (i = 0; i < 4; i++) {
#ifdef CONFIG_EISA_VLB_PRIMING
/*
* This ugly stuff is used to wake up VL-bus cards
* (AHA-284x is the only known example), so we can
* read the EISA id.
*
* Thankfully, this only exists on x86...
*/
outb(0x80 + i, addr);
#endif
sig[i] = inb(addr + i);
if (!i && (sig[0] & 0x80))
return NULL;
}
sig_str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1);
sig_str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1);
sig_str[2] = (sig[1] & 0x1f) + ('A' - 1);
rev = (sig[2] << 8) | sig[3];
sprintf(sig_str + 3, "%04X", rev);
return sig_str;
}
static int eisa_bus_match(struct device *dev, struct device_driver *drv)
{
struct eisa_device *edev = to_eisa_device(dev);
struct eisa_driver *edrv = to_eisa_driver(drv);
const struct eisa_device_id *eids = edrv->id_table;
if (!eids)
return 0;
while (strlen(eids->sig)) {
if (!strcmp(eids->sig, edev->id.sig) &&
edev->state & EISA_CONFIG_ENABLED) {
edev->id.driver_data = eids->driver_data;
return 1;
}
eids++;
}
return 0;
}
static int eisa_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct eisa_device *edev = to_eisa_device(dev);
add_uevent_var(env, "MODALIAS=" EISA_DEVICE_MODALIAS_FMT, edev->id.sig);
return 0;
}
struct bus_type eisa_bus_type = {
.name = "eisa",
.match = eisa_bus_match,
.uevent = eisa_bus_uevent,
};
EXPORT_SYMBOL(eisa_bus_type);
int eisa_driver_register(struct eisa_driver *edrv)
{
edrv->driver.bus = &eisa_bus_type;
return driver_register(&edrv->driver);
}
EXPORT_SYMBOL(eisa_driver_register);
void eisa_driver_unregister(struct eisa_driver *edrv)
{
driver_unregister(&edrv->driver);
}
EXPORT_SYMBOL(eisa_driver_unregister);
static ssize_t signature_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct eisa_device *edev = to_eisa_device(dev);
return sprintf(buf, "%s\n", edev->id.sig);
}
static DEVICE_ATTR_RO(signature);
static ssize_t enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct eisa_device *edev = to_eisa_device(dev);
return sprintf(buf, "%d\n", edev->state & EISA_CONFIG_ENABLED);
}
static DEVICE_ATTR_RO(enabled);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct eisa_device *edev = to_eisa_device(dev);
return sprintf(buf, EISA_DEVICE_MODALIAS_FMT "\n", edev->id.sig);
}
static DEVICE_ATTR_RO(modalias);
static int __init eisa_init_device(struct eisa_root_device *root,
struct eisa_device *edev,
int slot)
{
char *sig;
unsigned long sig_addr;
int i;
sig_addr = SLOT_ADDRESS(root, slot) + EISA_VENDOR_ID_OFFSET;
sig = decode_eisa_sig(sig_addr);
if (!sig)
return -1; /* No EISA device here */
memcpy(edev->id.sig, sig, EISA_SIG_LEN);
edev->slot = slot;
edev->state = inb(SLOT_ADDRESS(root, slot) + EISA_CONFIG_OFFSET)
& EISA_CONFIG_ENABLED;
edev->base_addr = SLOT_ADDRESS(root, slot);
edev->dma_mask = root->dma_mask; /* Default DMA mask */
eisa_name_device(edev);
edev->dev.parent = root->dev;
edev->dev.bus = &eisa_bus_type;
edev->dev.dma_mask = &edev->dma_mask;
edev->dev.coherent_dma_mask = edev->dma_mask;
dev_set_name(&edev->dev, "%02X:%02X", root->bus_nr, slot);
for (i = 0; i < EISA_MAX_RESOURCES; i++) {
#ifdef CONFIG_EISA_NAMES
edev->res[i].name = edev->pretty_name;
#else
edev->res[i].name = edev->id.sig;
#endif
}
if (is_forced_dev(enable_dev, enable_dev_count, root, edev))
edev->state = EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED;
if (is_forced_dev(disable_dev, disable_dev_count, root, edev))
edev->state = EISA_CONFIG_FORCED;
return 0;
}
static int __init eisa_register_device(struct eisa_device *edev)
{
int rc = device_register(&edev->dev);
if (rc) {
put_device(&edev->dev);
return rc;
}
rc = device_create_file(&edev->dev, &dev_attr_signature);
if (rc)
goto err_devreg;
rc = device_create_file(&edev->dev, &dev_attr_enabled);
if (rc)
goto err_sig;
rc = device_create_file(&edev->dev, &dev_attr_modalias);
if (rc)
goto err_enab;
return 0;
err_enab:
device_remove_file(&edev->dev, &dev_attr_enabled);
err_sig:
device_remove_file(&edev->dev, &dev_attr_signature);
err_devreg:
device_unregister(&edev->dev);
return rc;
}
static int __init eisa_request_resources(struct eisa_root_device *root,
struct eisa_device *edev,
int slot)
{
int i;
for (i = 0; i < EISA_MAX_RESOURCES; i++) {
/* Don't register resource for slot 0, since this is
* very likely to fail... :-( Instead, grab the EISA
* id, now we can display something in /proc/ioports.
*/
/* Only one region for mainboard */
if (!slot && i > 0) {
edev->res[i].start = edev->res[i].end = 0;
continue;
}
if (slot) {
edev->res[i].name = NULL;
edev->res[i].start = SLOT_ADDRESS(root, slot)
+ (i * 0x400);
edev->res[i].end = edev->res[i].start + 0xff;
edev->res[i].flags = IORESOURCE_IO;
} else {
edev->res[i].name = NULL;
edev->res[i].start = SLOT_ADDRESS(root, slot)
+ EISA_VENDOR_ID_OFFSET;
edev->res[i].end = edev->res[i].start + 3;
edev->res[i].flags = IORESOURCE_IO | IORESOURCE_BUSY;
}
if (request_resource(root->res, &edev->res[i]))
goto failed;
}
return 0;
failed:
while (--i >= 0)
release_resource(&edev->res[i]);
return -1;
}
static void __init eisa_release_resources(struct eisa_device *edev)
{
int i;
for (i = 0; i < EISA_MAX_RESOURCES; i++)
if (edev->res[i].start || edev->res[i].end)
release_resource(&edev->res[i]);
}
static int __init eisa_probe(struct eisa_root_device *root)
{
int i, c;
struct eisa_device *edev;
char *enabled_str;
dev_info(root->dev, "Probing EISA bus %d\n", root->bus_nr);
/* First try to get hold of slot 0. If there is no device
* here, simply fail, unless root->force_probe is set. */
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev)
return -ENOMEM;
if (eisa_request_resources(root, edev, 0)) {
dev_warn(root->dev,
"EISA: Cannot allocate resource for mainboard\n");
kfree(edev);
if (!root->force_probe)
return -EBUSY;
goto force_probe;
}
if (eisa_init_device(root, edev, 0)) {
eisa_release_resources(edev);
kfree(edev);
if (!root->force_probe)
return -ENODEV;
goto force_probe;
}
dev_info(&edev->dev, "EISA: Mainboard %s detected\n", edev->id.sig);
if (eisa_register_device(edev)) {
dev_err(&edev->dev, "EISA: Failed to register %s\n",
edev->id.sig);
eisa_release_resources(edev);
kfree(edev);
}
force_probe:
for (c = 0, i = 1; i <= root->slots; i++) {
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev) {
dev_err(root->dev, "EISA: Out of memory for slot %d\n",
i);
continue;
}
if (eisa_request_resources(root, edev, i)) {
dev_warn(root->dev,
"Cannot allocate resource for EISA slot %d\n",
i);
kfree(edev);
continue;
}
if (eisa_init_device(root, edev, i)) {
eisa_release_resources(edev);
kfree(edev);
continue;
}
if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED))
enabled_str = " (forced enabled)";
else if (edev->state == EISA_CONFIG_FORCED)
enabled_str = " (forced disabled)";
else if (edev->state == 0)
enabled_str = " (disabled)";
else
enabled_str = "";
dev_info(&edev->dev, "EISA: slot %d: %s detected%s\n", i,
edev->id.sig, enabled_str);
c++;
if (eisa_register_device(edev)) {
dev_err(&edev->dev, "EISA: Failed to register %s\n",
edev->id.sig);
eisa_release_resources(edev);
kfree(edev);
}
}
dev_info(root->dev, "EISA: Detected %d card%s\n", c, c == 1 ? "" : "s");
return 0;
}
static struct resource eisa_root_res = {
.name = "EISA root resource",
.start = 0,
.end = 0xffffffff,
.flags = IORESOURCE_IO,
};
static int eisa_bus_count;
int __init eisa_root_register(struct eisa_root_device *root)
{
int err;
/* Use our own resources to check if this bus base address has
* been already registered. This prevents the virtual root
* device from registering after the real one has, for
* example... */
root->eisa_root_res.name = eisa_root_res.name;
root->eisa_root_res.start = root->res->start;
root->eisa_root_res.end = root->res->end;
root->eisa_root_res.flags = IORESOURCE_BUSY;
err = request_resource(&eisa_root_res, &root->eisa_root_res);
if (err)
return err;
root->bus_nr = eisa_bus_count++;
err = eisa_probe(root);
if (err)
release_resource(&root->eisa_root_res);
return err;
}
static int __init eisa_init(void)
{
int r;
r = bus_register(&eisa_bus_type);
if (r)
return r;
printk(KERN_INFO "EISA bus registered\n");
return 0;
}
module_param_array(enable_dev, int, &enable_dev_count, 0444);
module_param_array(disable_dev, int, &disable_dev_count, 0444);
postcore_initcall(eisa_init);
int EISA_bus; /* for legacy drivers */
EXPORT_SYMBOL(EISA_bus);
| linux-master | drivers/eisa/eisa-bus.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.